summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-net8
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-phydev36
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt3
-rw-r--r--Documentation/networking/checksum-offloads.txt11
-rw-r--r--Documentation/networking/timestamping.txt26
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/m32r/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcent2.dts4
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--drivers/bluetooth/Kconfig3
-rw-r--r--drivers/bluetooth/btintel.c2
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/btwilink.c1
-rw-r--r--drivers/bluetooth/hci_ldisc.c40
-rw-r--r--drivers/bluetooth/hci_ll.c1
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c32
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/net/can/m_can/m_can.c87
-rw-r--r--drivers/net/dsa/Kconfig40
-rw-r--r--drivers/net/dsa/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_common.c11
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c342
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h31
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c243
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h37
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c229
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h48
-rw-r--r--drivers/net/dsa/qca8k.c5
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c138
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c188
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h70
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c74
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c110
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c77
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c56
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c93
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c27
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c107
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c260
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c1
-rw-r--r--drivers/net/ethernet/jme.c42
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h102
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c620
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h98
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h219
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1291
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h369
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c992
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1023
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c1891
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h56
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c181
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h63
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c255
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c391
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c140
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h114
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3326
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c136
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3539
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c267
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c62
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c43
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c73
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c54
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1
-rw-r--r--drivers/net/ieee802154/ca8210.c12
-rw-r--r--drivers/net/macsec.c15
-rw-r--r--drivers/net/phy/broadcom.c30
-rw-r--r--drivers/net/phy/marvell.c730
-rw-r--r--drivers/net/phy/mdio-xgene.c74
-rw-r--r--drivers/net/phy/mdio-xgene.h3
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/phy_device.c13
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/tap.c25
-rw-r--r--drivers/net/tun.c31
-rw-r--r--drivers/net/usb/net1080.c9
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c92
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h1
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c3
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_fw.c20
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/vhost/net.c128
-rw-r--r--include/linux/if_bridge.h14
-rw-r--r--include/linux/if_tap.h5
-rw-r--r--include/linux/if_tun.h5
-rw-r--r--include/linux/mlx5/device.h6
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mlx5/mlx5_ifc.h11
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h144
-rw-r--r--include/linux/netdevice.h11
-rw-r--r--include/linux/netlink.h5
-rw-r--r--include/linux/phy.h17
-rw-r--r--include/linux/ptr_ring.h120
-rw-r--r--include/linux/qed/common_hsi.h209
-rw-r--r--include/linux/qed/eth_common.h3
-rw-r--r--include/linux/qed/fcoe_common.h1
-rw-r--r--include/linux/qed/iscsi_common.h91
-rw-r--r--include/linux/qed/qed_if.h33
-rw-r--r--include/linux/qed/rdma_common.h2
-rw-r--r--include/linux/qed/roce_common.h2
-rw-r--r--include/linux/qed/tcp_common.h5
-rw-r--r--include/linux/skb_array.h31
-rw-r--r--include/linux/skbuff.h146
-rw-r--r--include/linux/tcp.h24
-rw-r--r--include/linux/udp.h3
-rw-r--r--include/net/act_api.h13
-rw-r--r--include/net/bluetooth/hci.h8
-rw-r--r--include/net/dsa.h44
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/flow_dissector.h9
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_fib.h4
-rw-r--r--include/net/pkt_cls.h25
-rw-r--r--include/net/pkt_sched.h3
-rw-r--r--include/net/request_sock.h2
-rw-r--r--include/net/route.h12
-rw-r--r--include/net/sch_generic.h26
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/tcp.h77
-rw-r--r--include/net/udp.h9
-rw-r--r--include/net/udplite.h2
-rw-r--r--include/soc/fsl/qe/qe.h9
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/linux/net_tstamp.h15
-rw-r--r--include/uapi/linux/pkt_cls.h4
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/ecdh_helper.c11
-rw-r--r--net/bluetooth/hci_core.c46
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/bridge/br_stp_if.c11
-rw-r--r--net/bridge/br_vlan.c8
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c5
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/core/datagram.c92
-rw-r--r--net/core/dev.c90
-rw-r--r--net/core/dev_ioctl.c1
-rw-r--r--net/core/flow_dissector.c29
-rw-r--r--net/core/net-sysfs.c8
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dcb/dcbnl.c11
-rw-r--r--net/dccp/ccids/ccid2.c8
-rw-r--r--net/dccp/ccids/ccid2.h2
-rw-r--r--net/dsa/Kconfig8
-rw-r--r--net/dsa/Makefile8
-rw-r--r--net/dsa/dsa.c20
-rw-r--r--net/dsa/dsa2.c16
-rw-r--r--net/dsa/dsa_priv.h101
-rw-r--r--net/dsa/legacy.c12
-rw-r--r--net/dsa/port.c259
-rw-r--r--net/dsa/slave.c367
-rw-r--r--net/dsa/switch.c174
-rw-r--r--net/dsa/tag_brcm.c4
-rw-r--r--net/dsa/tag_dsa.c2
-rw-r--r--net/dsa/tag_edsa.c2
-rw-r--r--net/dsa/tag_lan9303.c2
-rw-r--r--net/dsa/tag_mtk.c2
-rw-r--r--net/dsa/tag_qca.c4
-rw-r--r--net/dsa/tag_trailer.c4
-rw-r--r--net/ieee802154/socket.c10
-rw-r--r--net/ipv4/fib_frontend.c18
-rw-r--r--net/ipv4/fib_lookup.h3
-rw-r--r--net/ipv4/fib_semantics.c133
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/fou.c82
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/route.c154
-rw-r--r--net/ipv4/syncookies.c8
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_bbr.c43
-rw-r--r--net/ipv4/tcp_bic.c6
-rw-r--r--net/ipv4/tcp_cubic.c14
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_input.c163
-rw-r--r--net/ipv4/tcp_ipv4.c16
-rw-r--r--net/ipv4/tcp_lp.c17
-rw-r--r--net/ipv4/tcp_metrics.c2
-rw-r--r--net/ipv4/tcp_minisocks.c8
-rw-r--r--net/ipv4/tcp_nv.c5
-rw-r--r--net/ipv4/tcp_output.c131
-rw-r--r--net/ipv4/tcp_rate.c16
-rw-r--r--net/ipv4/tcp_recovery.c24
-rw-r--r--net/ipv4/tcp_timer.c45
-rw-r--r--net/ipv4/tcp_westwood.c6
-rw-r--r--net/ipv4/udp.c156
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/fou6.c14
-rw-r--r--net/ipv6/ila/ila_lwt.c2
-rw-r--r--net/ipv6/ip6_fib.c18
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/route.c131
-rw-r--r--net/ipv6/seg6.c4
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/kcm/kcmsock.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/nfc/af_nfc.c2
-rw-r--r--net/openvswitch/datapath.c2
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rxrpc/Makefile1
-rw-r--r--net/rxrpc/af_rxrpc.c35
-rw-r--r--net/rxrpc/ar-internal.h65
-rw-r--r--net/rxrpc/call_accept.c14
-rw-r--r--net/rxrpc/call_object.c39
-rw-r--r--net/rxrpc/conn_client.c153
-rw-r--r--net/rxrpc/conn_object.c55
-rw-r--r--net/rxrpc/conn_service.c11
-rw-r--r--net/rxrpc/local_object.c48
-rw-r--r--net/rxrpc/net_ns.c85
-rw-r--r--net/rxrpc/peer_object.c26
-rw-r--r--net/rxrpc/proc.c40
-rw-r--r--net/sched/act_api.c55
-rw-r--r--net/sched/act_csum.c1
-rw-r--r--net/sched/cls_api.c425
-rw-r--r--net/sched/cls_flower.c13
-rw-r--r--net/sched/sch_api.c50
-rw-r--r--net/sched/sch_atm.c29
-rw-r--r--net/sched/sch_cbq.c21
-rw-r--r--net/sched/sch_drr.c15
-rw-r--r--net/sched/sch_dsmark.c17
-rw-r--r--net/sched/sch_fq.c8
-rw-r--r--net/sched/sch_fq_codel.c17
-rw-r--r--net/sched/sch_hfsc.c21
-rw-r--r--net/sched/sch_htb.c28
-rw-r--r--net/sched/sch_ingress.c61
-rw-r--r--net/sched/sch_multiq.c16
-rw-r--r--net/sched/sch_prio.c19
-rw-r--r--net/sched/sch_qfq.c16
-rw-r--r--net/sched/sch_sfb.c17
-rw-r--r--net/sched/sch_sfq.c17
-rw-r--r--net/sctp/offload.c7
-rw-r--r--net/sctp/output.c1
-rw-r--r--net/socket.c49
-rwxr-xr-xtools/hv/bondvf.sh21
401 files changed, 18162 insertions, 9137 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 668604fc8e06..6856da99b6f7 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -251,3 +251,11 @@ Contact: netdev@vger.kernel.org
Description:
Indicates the unique physical switch identifier of a switch this
port belongs to, as a string.
+
+What: /sys/class/net/<iface>/phydev
+Date: May 2017
+KernelVersion: 4.13
+Contact: netdev@vger.kernel.org
+Description:
+ Symbolic link to the PHY device this network device is attached
+ to.
diff --git a/Documentation/ABI/testing/sysfs-class-net-phydev b/Documentation/ABI/testing/sysfs-class-net-phydev
new file mode 100644
index 000000000000..c768d5fd8496
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-phydev
@@ -0,0 +1,36 @@
+What: /sys/class/mdio_bus/<bus>/<device>/attached_dev
+Date: May 2017
+KernelVersion: 4.13
+Contact: netdev@vger.kernel.org
+Description:
+ Symbolic link to the network device this PHY device is
+ attached to.
+
+What: /sys/class/mdio_bus/<bus>/<device>/phy_has_fixups
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ Boolean value indicating whether the PHY device has
+ any fixups registered against it (phy_register_fixup)
+
+What: /sys/class/mdio_bus/<bus>/<device>/phy_id
+Date: November 2012
+KernelVersion: 3.8
+Contact: netdev@vger.kernel.org
+Description:
+ 32-bit hexadecimal value corresponding to the PHY device's OUI,
+ model and revision number.
+
+What: /sys/class/mdio_bus/<bus>/<device>/phy_interface
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ String value indicating the PHY interface, possible
+ values are:.
+ <empty> (not available), mii, gmii, sgmii, tbi, rev-mii,
+ rmii, rgmii, rgmii-id, rgmii-rxid, rgmii-txid, rtbi, smii
+ xgmii, moca, qsgmii, trgmii, 1000base-x, 2500base-x, rxaui,
+ unknown
+
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index d6c6e41648d4..eb679e92d525 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -13,6 +13,9 @@ Required properties:
"brcm,bcm5397"
"brcm,bcm5398"
+ For the BCM11360 SoC, must be:
+ "brcm,bcm11360-srab" and the mandatory "brcm,cygnus-srab" string
+
For the BCM5310x SoCs with an integrated switch, must be one of:
"brcm,bcm53010-srab"
"brcm,bcm53011-srab"
diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt
index 56e36861245f..d52d191bbb0c 100644
--- a/Documentation/networking/checksum-offloads.txt
+++ b/Documentation/networking/checksum-offloads.txt
@@ -35,6 +35,9 @@ This interface only allows a single checksum to be offloaded. Where
encapsulation is used, the packet may have multiple checksum fields in
different header layers, and the rest will have to be handled by another
mechanism such as LCO or RCO.
+CRC32c can also be offloaded using this interface, by means of filling
+ skb->csum_start and skb->csum_offset as described above, and setting
+ skb->csum_not_inet: see skbuff.h comment (section 'D') for more details.
No offloading of the IP header checksum is performed; it is always done in
software. This is OK because when we build the IP header, we obviously
have it in cache, so summing it isn't expensive. It's also rather short.
@@ -49,9 +52,9 @@ A driver declares its offload capabilities in netdev->hw_features; see
and csum_offset given in the SKB; if it tries to deduce these itself in
hardware (as some NICs do) the driver should check that the values in the
SKB match those which the hardware will deduce, and if not, fall back to
- checksumming in software instead (with skb_checksum_help or one of the
- skb_csum_off_chk* functions as mentioned in include/linux/skbuff.h). This
- is a pain, but that's what you get when hardware tries to be clever.
+ checksumming in software instead (with skb_csum_hwoffload_help() or one of
+ the skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in
+ include/linux/skbuff.h).
The stack should, for the most part, assume that checksum offload is
supported by the underlying device. The only place that should check is
@@ -60,7 +63,7 @@ The stack should, for the most part, assume that checksum offload is
may include other offloads besides TX Checksum Offload) and, if they are
not supported or enabled on the device (determined by netdev->features),
performs the corresponding offload in software. In the case of TX
- Checksum Offload, that means calling skb_checksum_help(skb).
+ Checksum Offload, that means calling skb_csum_hwoffload_help(skb, features).
LCO: Local Checksum Offload
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 96f50694a748..196ba17cc344 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -193,6 +193,24 @@ SOF_TIMESTAMPING_OPT_STATS:
the transmit timestamps, such as how long a certain block of
data was limited by peer's receiver window.
+SOF_TIMESTAMPING_OPT_PKTINFO:
+
+ Enable the SCM_TIMESTAMPING_PKTINFO control message for incoming
+ packets with hardware timestamps. The message contains struct
+ scm_ts_pktinfo, which supplies the index of the real interface which
+ received the packet and its length at layer 2. A valid (non-zero)
+ interface index will be returned only if CONFIG_NET_RX_BUSY_POLL is
+ enabled and the driver is using NAPI. The struct contains also two
+ other fields, but they are reserved and undefined.
+
+SOF_TIMESTAMPING_OPT_TX_SWHW:
+
+ Request both hardware and software timestamps for outgoing packets
+ when SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE
+ are enabled at the same time. If both timestamps are generated,
+ two separate messages will be looped to the socket's error queue,
+ each containing just one timestamp.
+
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
regardless of the setting of sysctl net.core.tstamp_allow_data.
@@ -312,7 +330,7 @@ struct scm_timestamping {
};
The structure can return up to three timestamps. This is a legacy
-feature. Only one field is non-zero at any time. Most timestamps
+feature. At least one field is non-zero at any time. Most timestamps
are passed in ts[0]. Hardware timestamps are passed in ts[2].
ts[1] used to hold hardware timestamps converted to system time.
@@ -321,6 +339,12 @@ a HW PTP clock source, to allow time conversion in userspace and
optionally synchronize system time with a userspace PTP stack such
as linuxptp. For the PTP clock API, see Documentation/ptp/ptp.txt.
+Note that if the SO_TIMESTAMP or SO_TIMESTAMPNS option is enabled
+together with SO_TIMESTAMPING using SOF_TIMESTAMPING_SOFTWARE, a false
+software timestamp will be generated in the recvmsg() call and passed
+in ts[0] when a real software timestamp is missing. This happens also
+on hardware transmit timestamps.
+
2.1.1 Transmit timestamps with MSG_ERRQUEUE
For transmit timestamps the outgoing packet is looped back to the
diff --git a/MAINTAINERS b/MAINTAINERS
index 9e984645c4b0..42378cf4b844 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -155,7 +155,7 @@ S: Maintained
F: drivers/scsi/53c700*
6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
-M: Alexander Aring <aar@pengutronix.de>
+M: Alexander Aring <alex.aring@gmail.com>
M: Jukka Rissanen <jukka.rissanen@linux.intel.com>
L: linux-bluetooth@vger.kernel.org
L: linux-wpan@vger.kernel.org
@@ -6427,7 +6427,7 @@ F: Documentation/cdrom/ide-cd
F: drivers/ide/ide-cd*
IEEE 802.15.4 SUBSYSTEM
-M: Alexander Aring <aar@pengutronix.de>
+M: Alexander Aring <alex.aring@gmail.com>
M: Stefan Schmidt <stefan@osg.samsung.com>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
@@ -8311,6 +8311,16 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
+MELLANOX ETHERNET INNOVA DRIVER
+M: Ilan Tayari <ilant@mellanox.com>
+R: Boris Pismenny <borisp@mellanox.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
+F: include/linux/mlx5/mlx5_ifc_fpga.h
+
MELLANOX ETHERNET SWITCH DRIVERS
M: Jiri Pirko <jiri@mellanox.com>
M: Ido Schimmel <idosch@mellanox.com>
@@ -8320,6 +8330,14 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
+MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
+M: Yotam Gigi <yotamg@mellanox.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+F: drivers/net/ethernet/mellanox/mlxfw/
+
MELLANOX MLXCPLD I2C AND MUX DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
M: Michael Shych <michaelsh@mellanox.com>
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 148d7a32754e..0926de63a62b 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -105,4 +105,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index 1ccf45657472..e491ff08b9a9 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -98,5 +98,7 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 2c3f4b48042a..869372413333 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -107,4 +107,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index ae6548d29a18..5d97890a8704 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -98,4 +98,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 3418ec9c1c50..365ff51f033a 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -116,4 +116,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 4526e92301a6..d013c0da0256 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -98,4 +98,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 514701840bd9..b893ca14fade 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
#define SO_COOKIE 0x4032
+#define SCM_TIMESTAMPING_PKTINFO 0x4033
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/boot/dts/fsl/kmcent2.dts b/arch/powerpc/boot/dts/fsl/kmcent2.dts
index 47afa438602e..5922c1ea0e96 100644
--- a/arch/powerpc/boot/dts/fsl/kmcent2.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcent2.dts
@@ -293,9 +293,7 @@
compatible = "fsl,ucc-hdlc";
rx-clock-name = "clk9";
tx-clock-name = "clk9";
- fsl,tx-timeslot-mask = <0xfffffffe>;
- fsl,rx-timeslot-mask = <0xfffffffe>;
- fsl,siram-entry-id = <0>;
+ fsl,hdlc-bus;
};
};
};
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 58e2ec0310fc..bc4ca72faf99 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -105,4 +105,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index e8e5ecf673fd..fb9769d7e74e 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -104,4 +104,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 3f4ad19d9ec7..5d673302fd41 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -94,6 +94,8 @@
#define SO_COOKIE 0x003b
+#define SCM_TIMESTAMPING_PKTINFO 0x003c
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 1eb6d2fe70d3..982c2533f912 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -109,4 +109,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 737d93ef27c5..35952a94875e 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -97,6 +97,7 @@ config BT_HCIUART_NOKIA
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on PM
+ select BT_HCIUART_H4
help
Nokia H4+ is serial protocol for communication between Bluetooth
device and host. This protocol is required for Bluetooth devices
@@ -131,7 +132,7 @@ config BT_HCIUART_ATH3K
config BT_HCIUART_LL
bool "HCILL protocol support"
- depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
help
HCILL (HCI Low Level) is a serial protocol for communication
between Bluetooth device and host. This protocol is required for
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index fce154855718..d32e109bd5cb 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -575,3 +575,5 @@ MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("intel/ibt-11-5.sfi");
MODULE_FIRMWARE("intel/ibt-11-5.ddc");
+MODULE_FIRMWARE("intel/ibt-12-16.sfi");
+MODULE_FIRMWARE("intel/ibt-12-16.ddc");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7fa373b428f8..278e81186150 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -336,6 +336,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -2036,6 +2037,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
switch (ver.hw_variant) {
case 0x0b: /* SfP */
case 0x0c: /* WsP */
+ case 0x11: /* JfP */
case 0x12: /* ThP */
break;
default:
@@ -2138,6 +2140,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* Currently the supported hardware variants are:
* 11 (0x0b) for iBT3.0 (LnP/SfP)
* 12 (0x0c) for iBT3.5 (WsP)
+ * 17 (0x11) for iBT3.5 (JfP)
+ * 18 (0x12) for iBT3.5 (ThP)
*/
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index b6bb58c41df5..85a3978b064f 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -262,7 +262,6 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
pkt_type = hci_skb_pkt_type(skb);
len = hst->st_write(skb);
if (len < 0) {
- kfree_skb(skb);
BT_ERR("ST write failed (%ld)", len);
/* Try Again, would only fail if UART has gone bad */
return -EAGAIN;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 2edd30556956..8397b716fa65 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -114,8 +114,12 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
struct sk_buff *skb = hu->tx_skb;
if (!skb) {
+ read_lock(&hu->proto_lock);
+
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
skb = hu->proto->dequeue(hu);
+
+ read_unlock(&hu->proto_lock);
} else {
hu->tx_skb = NULL;
}
@@ -125,18 +129,23 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
+ read_lock(&hu->proto_lock);
+
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
- return 0;
+ goto no_schedule;
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
- return 0;
+ goto no_schedule;
}
BT_DBG("");
schedule_work(&hu->write_work);
+no_schedule:
+ read_unlock(&hu->proto_lock);
+
return 0;
}
EXPORT_SYMBOL_GPL(hci_uart_tx_wakeup);
@@ -237,9 +246,13 @@ static int hci_uart_flush(struct hci_dev *hdev)
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
+ read_lock(&hu->proto_lock);
+
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
hu->proto->flush(hu);
+ read_unlock(&hu->proto_lock);
+
return 0;
}
@@ -261,10 +274,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb->len);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ read_lock(&hu->proto_lock);
+
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ read_unlock(&hu->proto_lock);
return -EUNATCH;
+ }
hu->proto->enqueue(hu, skb);
+ read_unlock(&hu->proto_lock);
hci_uart_tx_wakeup(hu);
@@ -460,6 +478,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
+ rwlock_init(&hu->proto_lock);
+
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
@@ -475,6 +495,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
{
struct hci_uart *hu = tty->disc_data;
struct hci_dev *hdev;
+ unsigned long flags;
BT_DBG("tty %p", tty);
@@ -490,7 +511,11 @@ static void hci_uart_tty_close(struct tty_struct *tty)
cancel_work_sync(&hu->write_work);
- if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ write_lock_irqsave(&hu->proto_lock, flags);
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ write_unlock_irqrestore(&hu->proto_lock, flags);
+
if (hdev) {
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
@@ -549,13 +574,18 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
if (!hu || tty != hu->tty)
return;
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ read_lock(&hu->proto_lock);
+
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ read_unlock(&hu->proto_lock);
return;
+ }
/* It does not need a lock here as it is already protected by a mutex in
* tty caller
*/
hu->proto->recv(hu, data, count);
+ read_unlock(&hu->proto_lock);
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index adc444f309a3..200288c87fc4 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -624,6 +624,7 @@ static int download_firmware(struct ll_device *lldev)
skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(lldev->hu.hdev, "send command failed\n");
+ err = PTR_ERR(skb);
goto out_rel_fw;
}
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 2b05e557fad0..c6e9e1cf63f8 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -87,6 +87,7 @@ struct hci_uart {
struct work_struct write_work;
const struct hci_uart_proto *proto;
+ rwlock_t proto_lock; /* Stop work for proto close */
void *priv;
struct sk_buff *tx_skb;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 94c049b62c2f..a384d72ea3cd 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
- *cqb = mlx5_vzalloc(*inlen);
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
goto err_db;
@@ -884,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
- *cqb = mlx5_vzalloc(*inlen);
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
goto err_buf;
@@ -1314,7 +1314,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto ex_resize;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index f1b56de64871..95db929bdc34 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -218,7 +218,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
(struct ib_pma_portcounters_ext *)(out_mad->data + 40);
int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- out_cnt = mlx5_vzalloc(sz);
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
@@ -231,7 +231,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
(struct ib_pma_portcounters *)(out_mad->data + 40);
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- out_cnt = mlx5_vzalloc(sz);
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d45772da0963..42defaa0d6c6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -60,8 +60,7 @@
#include "cmd.h"
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "2.2-1"
-#define DRIVER_RELDATE "Feb 2014"
+#define DRIVER_VERSION "5.0-0"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
@@ -70,7 +69,7 @@ MODULE_VERSION(DRIVER_VERSION);
static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
- DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
+ DRIVER_VERSION "\n";
enum {
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
@@ -2263,7 +2262,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler || !spec) {
err = -ENOMEM;
@@ -3456,7 +3455,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
__be32 val;
int ret, i;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -3485,7 +3484,7 @@ static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
int ret, i;
int offset = port->cnts.num_q_counters;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 366433f71b58..763bb5b36144 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1110,7 +1110,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
sizeof(*pas) * ((npages + 1) / 2) * 2;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_1;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 93959e1e43a3..d17aad0f54c0 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -823,7 +823,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
- *in = mlx5_vzalloc(*inlen);
+ *in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_umem;
@@ -931,7 +931,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
- *in = mlx5_vzalloc(*inlen);
+ *in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_buf;
@@ -1060,7 +1060,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
return err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_umem;
@@ -1140,7 +1140,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
u32 rq_pas_size = get_rq_pas_size(qpc);
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1193,7 +1193,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1372,7 +1372,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1633,7 +1633,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (err)
return err;
} else {
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2164,7 +2164,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2189,7 +2189,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2434,7 +2434,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2479,7 +2479,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -4294,7 +4294,7 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(query_sq_out);
- out = mlx5_vzalloc(inlen);
+ out = kvzalloc(inlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -4321,7 +4321,7 @@ static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
int err;
inlen = MLX5_ST_SZ_BYTES(query_rq_out);
- out = mlx5_vzalloc(inlen);
+ out = kvzalloc(inlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -4625,7 +4625,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
dev = to_mdev(pd->device);
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -4855,7 +4855,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
return ERR_PTR(-ENOMEM);
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err;
@@ -4934,7 +4934,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
return -EOPNOTSUPP;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 7cb145f9a6db..43707b101f47 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem;
}
- in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
+ in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL);
if (!in->pas) {
err = -ENOMEM;
goto err_umem;
@@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
- in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages);
+ in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL);
if (!in->pas) {
err = -ENOMEM;
goto err_buf;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index bf8fdaeb955e..f4947a74b65f 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -621,10 +621,8 @@ static int __m_can_get_berr_counter(const struct net_device *dev,
return 0;
}
-static int m_can_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
+static int m_can_clk_start(struct m_can_priv *priv)
{
- struct m_can_priv *priv = netdev_priv(dev);
int err;
err = clk_prepare_enable(priv->hclk);
@@ -632,15 +630,31 @@ static int m_can_get_berr_counter(const struct net_device *dev,
return err;
err = clk_prepare_enable(priv->cclk);
- if (err) {
+ if (err)
clk_disable_unprepare(priv->hclk);
- return err;
- }
- __m_can_get_berr_counter(dev, bec);
+ return err;
+}
+static void m_can_clk_stop(struct m_can_priv *priv)
+{
clk_disable_unprepare(priv->cclk);
clk_disable_unprepare(priv->hclk);
+}
+
+static int m_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct m_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = m_can_clk_start(priv);
+ if (err)
+ return err;
+
+ __m_can_get_berr_counter(dev, bec);
+
+ m_can_clk_stop(priv);
return 0;
}
@@ -1276,19 +1290,15 @@ static int m_can_open(struct net_device *dev)
struct m_can_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->hclk);
+ err = m_can_clk_start(priv);
if (err)
return err;
- err = clk_prepare_enable(priv->cclk);
- if (err)
- goto exit_disable_hclk;
-
/* open the can device */
err = open_candev(dev);
if (err) {
netdev_err(dev, "failed to open can device\n");
- goto exit_disable_cclk;
+ goto exit_disable_clks;
}
/* register interrupt handler */
@@ -1310,10 +1320,8 @@ static int m_can_open(struct net_device *dev)
exit_irq_fail:
close_candev(dev);
-exit_disable_cclk:
- clk_disable_unprepare(priv->cclk);
-exit_disable_hclk:
- clk_disable_unprepare(priv->hclk);
+exit_disable_clks:
+ m_can_clk_stop(priv);
return err;
}
@@ -1324,9 +1332,6 @@ static void m_can_stop(struct net_device *dev)
/* disable all interrupts */
m_can_disable_all_interrupts(priv);
- clk_disable_unprepare(priv->hclk);
- clk_disable_unprepare(priv->cclk);
-
/* set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
}
@@ -1338,6 +1343,7 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&priv->napi);
m_can_stop(dev);
+ m_can_clk_stop(priv);
free_irq(dev->irq, dev);
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1489,11 +1495,23 @@ static int register_m_can_dev(struct net_device *dev)
return register_candev(dev);
}
+static void m_can_init_ram(struct m_can_priv *priv)
+{
+ int end, i, start;
+
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = priv->mcfg[MRAM_SIDF].off;
+ end = priv->mcfg[MRAM_TXB].off +
+ priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
+ for (i = start; i < end; i += 4)
+ writel(0x0, priv->mram_base + i);
+}
+
static void m_can_of_parse_mram(struct m_can_priv *priv,
const u32 *mram_config_vals)
{
- int i, start, end;
-
priv->mcfg[MRAM_SIDF].off = mram_config_vals[0];
priv->mcfg[MRAM_SIDF].num = mram_config_vals[1];
priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
@@ -1529,15 +1547,7 @@ static void m_can_of_parse_mram(struct m_can_priv *priv,
priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = priv->mcfg[MRAM_SIDF].off;
- end = priv->mcfg[MRAM_TXB].off +
- priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- for (i = start; i < end; i += 4)
- writel(0x0, priv->mram_base + i);
-
+ m_can_init_ram(priv);
}
static int m_can_plat_probe(struct platform_device *pdev)
@@ -1658,6 +1668,8 @@ failed_ret:
return ret;
}
+/* TODO: runtime PM with power down or sleep mode */
+
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1666,10 +1678,10 @@ static __maybe_unused int m_can_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ m_can_stop(ndev);
+ m_can_clk_stop(priv);
}
- /* TODO: enter low power */
-
priv->can.state = CAN_STATE_SLEEPING;
return 0;
@@ -1680,11 +1692,18 @@ static __maybe_unused int m_can_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_priv *priv = netdev_priv(ndev);
- /* TODO: exit low power */
+ m_can_init_ram(priv);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
+ int ret;
+
+ ret = m_can_clk_start(priv);
+ if (ret)
+ return ret;
+
+ m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 862ee22303c2..68131a45ac5e 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,13 +1,7 @@
menu "Distributed Switch Architecture drivers"
depends on HAVE_NET_DSA
-config NET_DSA_MV88E6060
- tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_TAG_TRAILER
- ---help---
- This enables support for the Marvell 88E6060 ethernet switch
- chip.
+source "drivers/net/dsa/b53/Kconfig"
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
@@ -21,19 +15,6 @@ config NET_DSA_BCM_SF2
This enables support for the Broadcom Starfighter 2 Ethernet
switch chips.
-source "drivers/net/dsa/b53/Kconfig"
-
-source "drivers/net/dsa/mv88e6xxx/Kconfig"
-
-config NET_DSA_QCA8K
- tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- depends on NET_DSA
- select NET_DSA_TAG_QCA
- select REGMAP
- ---help---
- This enables support for the Qualcomm Atheros QCA8K Ethernet
- switch chips.
-
config NET_DSA_LOOP
tristate "DSA mock-up Ethernet switch chip support"
depends on NET_DSA
@@ -50,6 +31,25 @@ config NET_DSA_MT7530
This enables support for the Mediatek MT7530 Ethernet switch
chip.
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ depends on NET_DSA
+ select NET_DSA_TAG_TRAILER
+ ---help---
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+source "drivers/net/dsa/mv88e6xxx/Kconfig"
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ depends on NET_DSA
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index edd630361736..9613f36083a6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,11 +1,11 @@
-obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
-obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
obj-y += b53/
obj-y += mv88e6xxx/
-obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index fa0eece21eef..fa099ed41652 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -29,7 +29,6 @@
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include "b53_regs.h"
#include "b53_priv.h"
@@ -1056,7 +1055,7 @@ EXPORT_SYMBOL(b53_vlan_del);
int b53_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct b53_device *dev = ds->priv;
u16 vid, vid_start = 0, pvid;
@@ -1285,7 +1284,7 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
static int b53_fdb_copy(struct net_device *dev, int port,
const struct b53_arl_entry *ent,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
if (!ent->is_valid)
return 0;
@@ -1302,7 +1301,7 @@ static int b53_fdb_copy(struct net_device *dev, int port,
int b53_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct b53_device *priv = ds->priv;
struct net_device *dev = ds->ports[port].netdev;
@@ -1344,7 +1343,7 @@ EXPORT_SYMBOL(b53_fdb_dump);
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- s8 cpu_port = ds->dst->cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
@@ -1390,7 +1389,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
- s8 cpu_port = ds->dst->cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a9dc90a01438..155a9c48c317 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -395,7 +395,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
int b53_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
@@ -406,7 +406,7 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb);
int b53_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
void b53_mirror_del(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 8a62b6a69703..c37ffd1b6833 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -364,6 +364,7 @@ static const struct of_device_id b53_srab_of_match[] = {
{ .compatible = "brcm,bcm53018-srab" },
{ .compatible = "brcm,bcm53019-srab" },
{ .compatible = "brcm,bcm5301x-srab" },
+ { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
@@ -371,6 +372,7 @@ static const struct of_device_id b53_srab_of_match[] = {
{ .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2be963252ca5..687a8bae5d73 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -28,7 +28,6 @@
#include <linux/if_bridge.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
-#include <net/switchdev.h>
#include <linux/platform_data/b53.h>
#include "bcm_sf2.h"
@@ -228,7 +227,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->dst[ds->index].cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
unsigned int i;
u32 reg;
@@ -832,7 +831,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
{
struct net_device *p = ds->dst[ds->index].master_netdev;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->dst[ds->index].cpu_port;
+ s8 cpu_port = ds->dst->cpu_dp->index;
struct ethtool_wolinfo pwol;
p->ethtool_ops->get_wol(p, &pwol);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index a19e1781e9bb..5edf07beb9d2 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/if_bridge.h>
-#include <net/switchdev.h>
#include <net/dsa.h>
#include "dsa_loop.h"
@@ -188,7 +187,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port,
static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct dsa_loop_priv *ps = ds->priv;
struct mii_bus *bus = ps->bus;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b070c167e70f..4d2f45153ede 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -28,7 +28,6 @@
#include <linux/reset.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include "mt7530.h"
@@ -854,7 +853,7 @@ mt7530_port_fdb_del(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mt7530_priv *priv = ds->priv;
struct mt7530_fdb _fdb = { 0 };
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 5934b7a4c448..dce7fa57eb55 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -176,7 +176,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
(dsa_is_cpu_port(ds, p) ?
ds->enabled_port_mask :
- BIT(ds->dst->cpu_port)));
+ BIT(ds->dst->cpu_dp->index)));
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 6edd869c8d6f..5cd5551461e3 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -4,4 +4,6 @@ mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
+mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
+mv88e6xxx-objs += serdes.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d034d8cd7d22..c2f38f6770aa 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -32,12 +32,13 @@
#include <linux/gpio/consumer.h>
#include <linux/phy.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include "mv88e6xxx.h"
#include "global1.h"
#include "global2.h"
+#include "phy.h"
#include "port.h"
+#include "serdes.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
@@ -222,21 +223,7 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
-static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
-{
- return mv88e6xxx_read(chip, addr, reg, val);
-}
-
-static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val)
-{
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
-static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
@@ -248,106 +235,6 @@ static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
return mdio_bus->bus;
}
-static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
- int reg, u16 *val)
-{
- int addr = phy; /* PHY devices addresses start at 0x0 */
- struct mii_bus *bus;
-
- bus = mv88e6xxx_default_mdio_bus(chip);
- if (!bus)
- return -EOPNOTSUPP;
-
- if (!chip->info->ops->phy_read)
- return -EOPNOTSUPP;
-
- return chip->info->ops->phy_read(chip, bus, addr, reg, val);
-}
-
-static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
- int reg, u16 val)
-{
- int addr = phy; /* PHY devices addresses start at 0x0 */
- struct mii_bus *bus;
-
- bus = mv88e6xxx_default_mdio_bus(chip);
- if (!bus)
- return -EOPNOTSUPP;
-
- if (!chip->info->ops->phy_write)
- return -EOPNOTSUPP;
-
- return chip->info->ops->phy_write(chip, bus, addr, reg, val);
-}
-
-static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
-{
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PHY_PAGE))
- return -EOPNOTSUPP;
-
- return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
-}
-
-static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
-{
- int err;
-
- /* Restore PHY page Copper 0x0 for access via the registered MDIO bus */
- err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER);
- if (unlikely(err)) {
- dev_err(chip->dev, "failed to restore PHY %d page Copper (%d)\n",
- phy, err);
- }
-}
-
-static int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
- u8 page, int reg, u16 *val)
-{
- int err;
-
- /* There is no paging for registers 22 */
- if (reg == PHY_PAGE)
- return -EINVAL;
-
- err = mv88e6xxx_phy_page_get(chip, phy, page);
- if (!err) {
- err = mv88e6xxx_phy_read(chip, phy, reg, val);
- mv88e6xxx_phy_page_put(chip, phy);
- }
-
- return err;
-}
-
-static int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
- u8 page, int reg, u16 val)
-{
- int err;
-
- /* There is no paging for registers 22 */
- if (reg == PHY_PAGE)
- return -EINVAL;
-
- err = mv88e6xxx_phy_page_get(chip, phy, page);
- if (!err) {
- err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
- mv88e6xxx_phy_page_put(chip, phy);
- }
-
- return err;
-}
-
-static int mv88e6xxx_serdes_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
-{
- return mv88e6xxx_phy_page_read(chip, ADDR_SERDES, SERDES_PAGE_FIBER,
- reg, val);
-}
-
-static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
-{
- return mv88e6xxx_phy_page_write(chip, ADDR_SERDES, SERDES_PAGE_FIBER,
- reg, val);
-}
-
static void mv88e6xxx_g1_irq_mask(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -561,122 +448,6 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
return mv88e6xxx_write(chip, addr, reg, val);
}
-static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
-{
- if (!chip->info->ops->ppu_disable)
- return 0;
-
- return chip->info->ops->ppu_disable(chip);
-}
-
-static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
-{
- if (!chip->info->ops->ppu_enable)
- return 0;
-
- return chip->info->ops->ppu_enable(chip);
-}
-
-static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
-{
- struct mv88e6xxx_chip *chip;
-
- chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
-
- mutex_lock(&chip->reg_lock);
-
- if (mutex_trylock(&chip->ppu_mutex)) {
- if (mv88e6xxx_ppu_enable(chip) == 0)
- chip->ppu_disabled = 0;
- mutex_unlock(&chip->ppu_mutex);
- }
-
- mutex_unlock(&chip->reg_lock);
-}
-
-static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
-{
- struct mv88e6xxx_chip *chip = (void *)_ps;
-
- schedule_work(&chip->ppu_work);
-}
-
-static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip)
-{
- int ret;
-
- mutex_lock(&chip->ppu_mutex);
-
- /* If the PHY polling unit is enabled, disable it so that
- * we can access the PHY registers. If it was already
- * disabled, cancel the timer that is going to re-enable
- * it.
- */
- if (!chip->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(chip);
- if (ret < 0) {
- mutex_unlock(&chip->ppu_mutex);
- return ret;
- }
- chip->ppu_disabled = 1;
- } else {
- del_timer(&chip->ppu_timer);
- ret = 0;
- }
-
- return ret;
-}
-
-static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip)
-{
- /* Schedule a timer to re-enable the PHY polling unit. */
- mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
- mutex_unlock(&chip->ppu_mutex);
-}
-
-static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
-{
- mutex_init(&chip->ppu_mutex);
- INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
- setup_timer(&chip->ppu_timer, mv88e6xxx_ppu_reenable_timer,
- (unsigned long)chip);
-}
-
-static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
-{
- del_timer_sync(&chip->ppu_timer);
-}
-
-static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val)
-{
- int err;
-
- err = mv88e6xxx_ppu_access_get(chip);
- if (!err) {
- err = mv88e6xxx_read(chip, addr, reg, val);
- mv88e6xxx_ppu_access_put(chip);
- }
-
- return err;
-}
-
-static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val)
-{
- int err;
-
- err = mv88e6xxx_ppu_access_get(chip);
- if (!err) {
- err = mv88e6xxx_write(chip, addr, reg, val);
- mv88e6xxx_ppu_access_put(chip);
- }
-
- return err;
-}
-
static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
int link, int speed, int duplex,
phy_interface_t mode)
@@ -1269,7 +1040,7 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry next = {
@@ -1700,7 +1471,7 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
u16 fid, u16 vid, int port,
struct switchdev_obj *obj,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_atu_entry addr;
int err;
@@ -1755,7 +1526,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
struct switchdev_obj *obj,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_vtu_entry vlan = {
.vid = chip->info->max_vid,
@@ -1792,7 +1563,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -1951,24 +1722,6 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
-static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
-{
- u16 val;
- int err;
-
- /* Clear Power Down bit */
- err = mv88e6xxx_serdes_read(chip, MII_BMCR, &val);
- if (err)
- return err;
-
- if (val & BMCR_PDOWN) {
- val &= ~BMCR_PDOWN;
- err = mv88e6xxx_serdes_write(chip, MII_BMCR, val);
- }
-
- return err;
-}
-
static int mv88e6xxx_set_port_mode(struct mv88e6xxx_chip *chip, int port,
enum mv88e6xxx_frame_mode frame, u16 egress,
u16 etype)
@@ -2050,6 +1803,21 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
return 0;
}
+static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
+ bool on)
+{
+ int err = 0;
+
+ if (chip->info->ops->serdes_power) {
+ err = chip->info->ops->serdes_power(chip, port, on);
+ if (err)
+ dev_err(chip->dev,
+ "Failed to change SERDES power: %d\n", err);
+ }
+
+ return err;
+}
+
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
@@ -2100,21 +1868,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- /* If this port is connected to a SerDes, make sure the SerDes is not
- * powered down.
+ /* Enable the SERDES interface for DSA and CPU ports. Normal
+ * ports SERDES are enabled when the port is enabled, thus
+ * saving a bit of power.
*/
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) {
- err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if ((dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) {
+ err = mv88e6xxx_serdes_power(chip, port, true);
if (err)
return err;
- reg &= PORT_STATUS_CMODE_MASK;
- if ((reg == PORT_STATUS_CMODE_100BASE_X) ||
- (reg == PORT_STATUS_CMODE_1000BASE_X) ||
- (reg == PORT_STATUS_CMODE_SGMII)) {
- err = mv88e6xxx_serdes_power_on(chip);
- if (err < 0)
- return err;
- }
}
/* Port Control 2: don't force a good FCS, set the maximum frame size to
@@ -2217,6 +1978,29 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
}
+static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err = 0;
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_serdes_power(chip, port, true);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_serdes_power(chip, port, false);
+ mutex_unlock(&chip->reg_lock);
+}
+
static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
int err;
@@ -2880,6 +2664,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -2944,6 +2729,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -3003,6 +2789,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -3035,6 +2822,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -3067,6 +2855,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -3100,6 +2889,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -3133,6 +2923,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -3322,6 +3113,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_power = mv88e6352_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3357,6 +3149,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3391,6 +3184,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .serdes_power = mv88e6390_serdes_power,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -3915,18 +3709,6 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
return chip;
}
-static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
- mv88e6xxx_ppu_state_init(chip);
-}
-
-static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
-{
- if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
- mv88e6xxx_ppu_state_destroy(chip);
-}
-
static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
struct mii_bus *bus, int sw_addr)
{
@@ -4038,7 +3820,7 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_mdb *mdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -4059,6 +3841,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
+ .port_enable = mv88e6xxx_port_enable,
+ .port_disable = mv88e6xxx_port_disable,
.set_eee = mv88e6xxx_set_eee,
.get_eee = mv88e6xxx_get_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 77236cd72df2..9087cb009cc3 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -37,9 +37,6 @@
#define PHY_PAGE 0x16
#define PHY_PAGE_COPPER 0x00
-#define ADDR_SERDES 0x0f
-#define SERDES_PAGE_FIBER 0x01
-
#define PORT_STATUS 0x00
#define PORT_STATUS_PAUSE_EN BIT(15)
#define PORT_STATUS_MY_PAUSE BIT(14)
@@ -511,14 +508,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */
MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */
- /* PHY Registers.
- */
- MV88E6XXX_CAP_PHY_PAGE, /* (0x16) Page Register */
-
- /* Fiber/SERDES Registers (SMI address F).
- */
- MV88E6XXX_CAP_SERDES,
-
/* Switch Global (1) Registers.
*/
MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */
@@ -553,10 +542,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA)
-#define MV88E6XXX_FLAG_PHY_PAGE BIT_ULL(MV88E6XXX_CAP_PHY_PAGE)
-
-#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
-
#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
@@ -577,11 +562,6 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_SMI_CMD | \
MV88E6XXX_FLAG_SMI_DATA)
-/* Fiber/SERDES Registers at SMI address F, page 1 */
-#define MV88E6XXX_FLAGS_SERDES \
- (MV88E6XXX_FLAG_PHY_PAGE | \
- MV88E6XXX_FLAG_SERDES)
-
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
@@ -629,8 +609,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_INT | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_SERDES)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
(MV88E6XXX_FLAG_G1_VTU_FID | \
@@ -651,8 +630,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAGS_IRL | \
- MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_SERDES)
+ MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6390 \
(MV88E6XXX_FLAG_EEE | \
@@ -884,6 +862,9 @@ struct mv88e6xxx_ops {
/* Can be either in g1 or g2, so don't use a prefix */
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
+ /* Power on/off a SERDES interface */
+ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
+
/* VLAN Translation Unit operations */
int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
@@ -942,5 +923,5 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 update);
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
-
+struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
#endif
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
new file mode 100644
index 000000000000..0d3e8aaedf50
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -0,0 +1,243 @@
+/*
+ * Marvell 88e6xxx Ethernet switch PHY and PPU support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <net/dsa.h>
+
+#include "mv88e6xxx.h"
+#include "phy.h"
+
+int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read(chip, bus, addr, reg, val);
+}
+
+int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write(chip, bus, addr, reg, val);
+}
+
+static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
+{
+ return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
+}
+
+static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy)
+{
+ int err;
+
+ /* Restore PHY page Copper 0x0 for access via the registered
+ * MDIO bus
+ */
+ err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER);
+ if (unlikely(err)) {
+ dev_err(chip->dev,
+ "failed to restore PHY %d page Copper (%d)\n",
+ phy, err);
+ }
+}
+
+int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val)
+{
+ int err;
+
+ /* There is no paging for registers 22 */
+ if (reg == PHY_PAGE)
+ return -EINVAL;
+
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_read(chip, phy, reg, val);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
+}
+
+int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val)
+{
+ int err;
+
+ /* There is no paging for registers 22 */
+ if (reg == PHY_PAGE)
+ return -EINVAL;
+
+ err = mv88e6xxx_phy_page_get(chip, phy, page);
+ if (!err) {
+ err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page);
+ mv88e6xxx_phy_page_put(chip, phy);
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ if (!chip->info->ops->ppu_disable)
+ return 0;
+
+ return chip->info->ops->ppu_disable(chip);
+}
+
+int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ if (!chip->info->ops->ppu_enable)
+ return 0;
+
+ return chip->info->ops->ppu_enable(chip);
+}
+
+static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mutex_trylock(&chip->ppu_mutex)) {
+ if (mv88e6xxx_ppu_enable(chip) == 0)
+ chip->ppu_disabled = 0;
+ mutex_unlock(&chip->ppu_mutex);
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
+{
+ struct mv88e6xxx_chip *chip = (void *)_ps;
+
+ schedule_work(&chip->ppu_work);
+}
+
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ mutex_lock(&chip->ppu_mutex);
+
+ /* If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!chip->ppu_disabled) {
+ ret = mv88e6xxx_ppu_disable(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->ppu_mutex);
+ return ret;
+ }
+ chip->ppu_disabled = 1;
+ } else {
+ del_timer(&chip->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip)
+{
+ /* Schedule a timer to re-enable the PHY polling unit. */
+ mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&chip->ppu_mutex);
+}
+
+static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
+{
+ mutex_init(&chip->ppu_mutex);
+ INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
+ setup_timer(&chip->ppu_timer, mv88e6xxx_ppu_reenable_timer,
+ (unsigned long)chip);
+}
+
+static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
+{
+ del_timer_sync(&chip->ppu_timer);
+}
+
+int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ int err;
+
+ err = mv88e6xxx_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_read(chip, addr, reg, val);
+ mv88e6xxx_ppu_access_put(chip);
+ }
+
+ return err;
+}
+
+int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ int err;
+
+ err = mv88e6xxx_ppu_access_get(chip);
+ if (!err) {
+ err = mv88e6xxx_write(chip, addr, reg, val);
+ mv88e6xxx_ppu_access_put(chip);
+ }
+
+ return err;
+}
+
+void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
+ mv88e6xxx_ppu_state_init(chip);
+}
+
+void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->ppu_enable && chip->info->ops->ppu_disable)
+ mv88e6xxx_ppu_state_destroy(chip);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
new file mode 100644
index 000000000000..0961d781b726
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -0,0 +1,37 @@
+/*
+ * Marvell 88E6xxx PHY access
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_PHY_H
+#define _MV88E6XXX_PHY_H
+
+int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6165_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
+ int reg, u16 val);
+int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 *val);
+int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
+ u8 page, int reg, u16 val);
+int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip);
+
+#endif /*_MV88E6XXX_PHY_H */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
new file mode 100644
index 000000000000..53795676bd70
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -0,0 +1,229 @@
+/*
+ * Marvell 88E6xxx SERDES manipulation, via SMI bus
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/mii.h>
+
+#include "global2.h"
+#include "mv88e6xxx.h"
+#include "phy.h"
+#include "port.h"
+#include "serdes.h"
+
+static int mv88e6352_serdes_read(struct mv88e6xxx_chip *chip, int reg,
+ u16 *val)
+{
+ return mv88e6xxx_phy_page_read(chip, MV88E6352_ADDR_SERDES,
+ MV88E6352_SERDES_PAGE_FIBER,
+ reg, val);
+}
+
+static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
+ u16 val)
+{
+ return mv88e6xxx_phy_page_write(chip, MV88E6352_ADDR_SERDES,
+ MV88E6352_SERDES_PAGE_FIBER,
+ reg, val);
+}
+
+static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
+{
+ u16 val, new_val;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (on)
+ new_val = val & ~BMCR_PDOWN;
+ else
+ new_val = val | BMCR_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6352_serdes_write(chip, MII_BMCR, new_val);
+
+ return err;
+}
+
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int err;
+ u8 cmode;
+
+ err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
+ if (err)
+ return err;
+
+ if ((cmode == PORT_STATUS_CMODE_100BASE_X) ||
+ (cmode == PORT_STATUS_CMODE_1000BASE_X) ||
+ (cmode == PORT_STATUS_CMODE_SGMII)) {
+ err = mv88e6352_serdes_power_set(chip, on);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
+static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
+{
+ u16 val, new_val;
+ int reg_c45;
+ int err;
+
+ reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
+ MV88E6390_PCS_CONTROL_1;
+ err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ if (err)
+ return err;
+
+ if (on)
+ new_val = val & ~(MV88E6390_PCS_CONTROL_1_RESET |
+ MV88E6390_PCS_CONTROL_1_LOOPBACK |
+ MV88E6390_PCS_CONTROL_1_PDOWN);
+ else
+ new_val = val | MV88E6390_PCS_CONTROL_1_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+
+ return err;
+}
+
+/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
+static int mv88e6390_serdes_sgmii(struct mv88e6xxx_chip *chip, int addr,
+ bool on)
+{
+ u16 val, new_val;
+ int reg_c45;
+ int err;
+
+ reg_c45 = MII_ADDR_C45 | MV88E6390_SERDES_DEVICE |
+ MV88E6390_SGMII_CONTROL;
+ err = mv88e6xxx_phy_read(chip, addr, reg_c45, &val);
+ if (err)
+ return err;
+
+ if (on)
+ new_val = val & ~(MV88E6390_SGMII_CONTROL_RESET |
+ MV88E6390_SGMII_CONTROL_LOOPBACK |
+ MV88E6390_SGMII_CONTROL_PDOWN);
+ else
+ new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
+
+ if (val != new_val)
+ err = mv88e6xxx_phy_write(chip, addr, reg_c45, new_val);
+
+ return err;
+}
+
+static int mv88e6390_serdes_lower(struct mv88e6xxx_chip *chip, u8 cmode,
+ int port_donor, int lane, bool rxaui, bool on)
+{
+ int err;
+ u8 cmode_donor;
+
+ err = mv88e6xxx_port_get_cmode(chip, port_donor, &cmode_donor);
+ if (err)
+ return err;
+
+ switch (cmode_donor) {
+ case PORT_STATUS_CMODE_RXAUI:
+ if (!rxaui)
+ break;
+ /* Fall through */
+ case PORT_STATUS_CMODE_1000BASE_X:
+ case PORT_STATUS_CMODE_SGMII:
+ case PORT_STATUS_CMODE_2500BASEX:
+ if (cmode == PORT_STATUS_CMODE_1000BASE_X ||
+ cmode == PORT_STATUS_CMODE_SGMII)
+ return mv88e6390_serdes_sgmii(chip, lane, on);
+ }
+ return 0;
+}
+
+static int mv88e6390_serdes_port9(struct mv88e6xxx_chip *chip, u8 cmode,
+ bool on)
+{
+ switch (cmode) {
+ case PORT_STATUS_CMODE_1000BASE_X:
+ case PORT_STATUS_CMODE_SGMII:
+ return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT9_LANE0, on);
+ case PORT_STATUS_CMODE_XAUI:
+ case PORT_STATUS_CMODE_RXAUI:
+ case PORT_STATUS_CMODE_2500BASEX:
+ return mv88e6390_serdes_10g(chip, MV88E6390_PORT9_LANE0, on);
+ }
+
+ return 0;
+}
+
+static int mv88e6390_serdes_port10(struct mv88e6xxx_chip *chip, u8 cmode,
+ bool on)
+{
+ switch (cmode) {
+ case PORT_STATUS_CMODE_SGMII:
+ return mv88e6390_serdes_sgmii(chip, MV88E6390_PORT10_LANE0, on);
+ case PORT_STATUS_CMODE_XAUI:
+ case PORT_STATUS_CMODE_RXAUI:
+ case PORT_STATUS_CMODE_1000BASE_X:
+ case PORT_STATUS_CMODE_2500BASEX:
+ return mv88e6390_serdes_10g(chip, MV88E6390_PORT10_LANE0, on);
+ }
+
+ return 0;
+}
+
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ u8 cmode;
+ int err;
+
+ err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
+ if (err)
+ return cmode;
+
+ switch (port) {
+ case 2:
+ return mv88e6390_serdes_lower(chip, cmode, 9,
+ MV88E6390_PORT9_LANE1,
+ false, on);
+ case 3:
+ return mv88e6390_serdes_lower(chip, cmode, 9,
+ MV88E6390_PORT9_LANE2,
+ true, on);
+ case 4:
+ return mv88e6390_serdes_lower(chip, cmode, 9,
+ MV88E6390_PORT9_LANE3,
+ true, on);
+ case 5:
+ return mv88e6390_serdes_lower(chip, cmode, 10,
+ MV88E6390_PORT10_LANE1,
+ false, on);
+ case 6:
+ return mv88e6390_serdes_lower(chip, cmode, 10,
+ MV88E6390_PORT10_LANE2,
+ true, on);
+ case 7:
+ return mv88e6390_serdes_lower(chip, cmode, 10,
+ MV88E6390_PORT10_LANE3,
+ true, on);
+ case 9:
+ return mv88e6390_serdes_port9(chip, cmode, on);
+ case 10:
+ return mv88e6390_serdes_port10(chip, cmode, on);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
new file mode 100644
index 000000000000..eb3ceaef790f
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -0,0 +1,48 @@
+/*
+ * Marvell 88E6xxx SERDES manipulation, via SMI bus
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_SERDES_H
+#define _MV88E6XXX_SERDES_H
+
+#include "mv88e6xxx.h"
+
+#define MV88E6352_ADDR_SERDES 0x0f
+#define MV88E6352_SERDES_PAGE_FIBER 0x01
+
+#define MV88E6390_PORT9_LANE0 0x09
+#define MV88E6390_PORT9_LANE1 0x12
+#define MV88E6390_PORT9_LANE2 0x13
+#define MV88E6390_PORT9_LANE3 0x14
+#define MV88E6390_PORT10_LANE0 0x0a
+#define MV88E6390_PORT10_LANE1 0x15
+#define MV88E6390_PORT10_LANE2 0x16
+#define MV88E6390_PORT10_LANE3 0x17
+#define MV88E6390_SERDES_DEVICE (4 << 16)
+
+/* 10GBASE-R and 10GBASE-X4/X2 */
+#define MV88E6390_PCS_CONTROL_1 0x1000
+#define MV88E6390_PCS_CONTROL_1_RESET BIT(15)
+#define MV88E6390_PCS_CONTROL_1_LOOPBACK BIT(14)
+#define MV88E6390_PCS_CONTROL_1_SPEED BIT(13)
+#define MV88E6390_PCS_CONTROL_1_PDOWN BIT(11)
+
+/* 1000BASE-X and SGMII */
+#define MV88E6390_SGMII_CONTROL 0x2000
+#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
+#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
+#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
+
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
+
+#endif
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a4fd4ccf7b67..0f6a011d8ed1 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -18,7 +18,6 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <net/dsa.h>
-#include <net/switchdev.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/if_bridge.h>
@@ -507,7 +506,7 @@ qca8k_setup(struct dsa_switch *ds)
pr_warn("regmap initialization failed");
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(ds->ports[ds->dst->cpu_port].dn);
+ phy_mode = of_get_phy_mode(ds->dst->cpu_dp->dn);
if (phy_mode < 0) {
pr_err("Can't find phy-mode for master device\n");
return phy_mode;
@@ -873,7 +872,7 @@ qca8k_port_fdb_del(struct dsa_switch *ds, int port,
static int
qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+ switchdev_obj_dump_cb_t *cb)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
struct qca8k_fdb _fdb = { 0 };
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index db02bc2fb4b2..05d9d3e2e92e 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -723,6 +723,12 @@ static int ax_init_dev(struct net_device *dev)
ax->plat->mac_addr)
memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ eth_hw_addr_random(dev);
+ dev_info(&dev->dev, "Using random MAC address: %pM\n",
+ dev->dev_addr);
+ }
+
ax_reset_8390(dev);
ei_local->name = "AX88796";
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c772420fa41c..5a2ad9c5faab 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1268,6 +1268,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
case HWTSTAMP_FILTER_NONE:
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
@@ -1390,8 +1391,7 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
}
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
}
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 28fdedc30b74..559963b1aa32 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -23,9 +23,17 @@
struct xgene_gstrings_stats {
char name[ETH_GSTRING_LEN];
int offset;
+ u32 addr;
+ u32 mask;
};
-#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) }
+#define XGENE_STAT(m) { #m, offsetof(struct rtnl_link_stats64, m) }
+#define XGENE_EXTD_STAT(s, a, m) \
+ { \
+ .name = #s, \
+ .addr = a ## _ADDR, \
+ .mask = m \
+ }
static const struct xgene_gstrings_stats gstrings_stats[] = {
XGENE_STAT(rx_packets),
@@ -40,7 +48,65 @@ static const struct xgene_gstrings_stats gstrings_stats[] = {
XGENE_STAT(rx_fifo_errors)
};
+static const struct xgene_gstrings_stats gstrings_extd_stats[] = {
+ XGENE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64, 31),
+ XGENE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127, 31),
+ XGENE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255, 31),
+ XGENE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511, 31),
+ XGENE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K, 31),
+ XGENE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX, 31),
+ XGENE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV, 31),
+ XGENE_EXTD_STAT(rx_fcs_error_cntr, RFCS, 16),
+ XGENE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA, 31),
+ XGENE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA, 31),
+ XGENE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF, 16),
+ XGENE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF, 16),
+ XGENE_EXTD_STAT(rx_unk_opcode_cntr, RXUO, 16),
+ XGENE_EXTD_STAT(rx_align_err_cntr, RALN, 16),
+ XGENE_EXTD_STAT(rx_frame_len_err_cntr, RFLR, 16),
+ XGENE_EXTD_STAT(rx_frame_len_err_recov_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(rx_code_err_cntr, RCDE, 16),
+ XGENE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE, 16),
+ XGENE_EXTD_STAT(rx_undersize_pkt_cntr, RUND, 16),
+ XGENE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR, 16),
+ XGENE_EXTD_STAT(rx_fragments_cntr, RFRG, 16),
+ XGENE_EXTD_STAT(rx_jabber_cntr, RJBR, 16),
+ XGENE_EXTD_STAT(rx_jabber_recov_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP, 16),
+ XGENE_EXTD_STAT(rx_overrun_cntr, DUMP, 0),
+ XGENE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA, 31),
+ XGENE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA, 31),
+ XGENE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF, 16),
+ XGENE_EXTD_STAT(tx_defer_pkt_cntr, TDFR, 31),
+ XGENE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF, 31),
+ XGENE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL, 31),
+ XGENE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL, 31),
+ XGENE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL, 31),
+ XGENE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL, 31),
+ XGENE_EXTD_STAT(tx_total_col_cntr, TNCL, 31),
+ XGENE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH, 16),
+ XGENE_EXTD_STAT(tx_drop_frame_cntr, TDRP, 16),
+ XGENE_EXTD_STAT(tx_jabber_frame_cntr, TJBR, 12),
+ XGENE_EXTD_STAT(tx_fcs_error_cntr, TFCS, 12),
+ XGENE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF, 12),
+ XGENE_EXTD_STAT(tx_oversize_frame_cntr, TOVR, 12),
+ XGENE_EXTD_STAT(tx_undersize_frame_cntr, TUND, 12),
+ XGENE_EXTD_STAT(tx_fragments_cntr, TFRG, 12),
+ XGENE_EXTD_STAT(tx_underrun_cntr, DUMP, 0)
+};
+
#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats)
+#define XGENE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats)
+#define RFCS_IDX 7
+#define RALN_IDX 13
+#define RFLR_IDX 14
+#define FALSE_RFLR_IDX 15
+#define RUND_IDX 18
+#define FALSE_RJBR_IDX 22
+#define RX_OVERRUN_IDX 24
+#define TFCS_IDX 38
+#define TFRG_IDX 42
+#define TX_UNDERRUN_IDX 43
static void xgene_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
@@ -61,7 +127,7 @@ static int xgene_get_link_ksettings(struct net_device *ndev,
struct phy_device *phydev = ndev->phydev;
u32 supported;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
if (phydev == NULL)
return -ENODEV;
@@ -111,7 +177,7 @@ static int xgene_set_link_ksettings(struct net_device *ndev,
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode)) {
if (!phydev)
return -ENODEV;
@@ -142,6 +208,11 @@ static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+ memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
}
static int xgene_get_sset_count(struct net_device *ndev, int sset)
@@ -149,18 +220,71 @@ static int xgene_get_sset_count(struct net_device *ndev, int sset)
if (sset != ETH_SS_STATS)
return -EINVAL;
- return XGENE_STATS_LEN;
+ return XGENE_STATS_LEN + XGENE_EXTD_STATS_LEN;
+}
+
+static void xgene_get_extd_stats(struct xgene_enet_pdata *pdata)
+{
+ u32 rx_drop, tx_drop;
+ u32 mask, tmp;
+ int i;
+
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) {
+ tmp = xgene_enet_rd_stat(pdata, gstrings_extd_stats[i].addr);
+ if (gstrings_extd_stats[i].mask) {
+ mask = GENMASK(gstrings_extd_stats[i].mask - 1, 0);
+ pdata->extd_stats[i] += (tmp & mask);
+ }
+ }
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ /* Errata 10GE_10 - SW should intepret RALN as 0 */
+ pdata->extd_stats[RALN_IDX] = 0;
+ } else {
+ /* Errata ENET_15 - Fixes RFCS, RFLR, TFCS counter */
+ pdata->extd_stats[RFCS_IDX] -= pdata->extd_stats[RALN_IDX];
+ pdata->extd_stats[RFLR_IDX] -= pdata->extd_stats[RUND_IDX];
+ pdata->extd_stats[TFCS_IDX] -= pdata->extd_stats[TFRG_IDX];
+ }
+
+ pdata->mac_ops->get_drop_cnt(pdata, &rx_drop, &tx_drop);
+ pdata->extd_stats[RX_OVERRUN_IDX] += rx_drop;
+ pdata->extd_stats[TX_UNDERRUN_IDX] += tx_drop;
+
+ /* Errata 10GE_8 - Update Frame recovered from Errata 10GE_8/ENET_11 */
+ pdata->extd_stats[FALSE_RFLR_IDX] = pdata->false_rflr;
+ /* Errata ENET_15 - Jabber Frame recov'ed from Errata 10GE_10/ENET_15 */
+ pdata->extd_stats[FALSE_RJBR_IDX] = pdata->vlan_rjbr;
+}
+
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata)
+{
+ pdata->extd_stats = devm_kmalloc_array(&pdata->pdev->dev,
+ XGENE_EXTD_STATS_LEN, sizeof(u64), GFP_KERNEL);
+ if (!pdata->extd_stats)
+ return -ENOMEM;
+
+ xgene_get_extd_stats(pdata);
+ memset(pdata->extd_stats, 0, XGENE_EXTD_STATS_LEN * sizeof(u64));
+
+ return 0;
}
static void xgene_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *dummy,
u64 *data)
{
- void *pdata = netdev_priv(ndev);
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct rtnl_link_stats64 stats;
int i;
+ dev_get_stats(ndev, &stats);
for (i = 0; i < XGENE_STATS_LEN; i++)
- *data++ = *(u64 *)(pdata + gstrings_stats[i].offset);
+ data[i] = *(u64 *)((char *)&stats + gstrings_stats[i].offset);
+
+ xgene_get_extd_stats(pdata);
+ for (i = 0; i < XGENE_EXTD_STATS_LEN; i++)
+ data[i + XGENE_STATS_LEN] = pdata->extd_stats[i];
}
static void xgene_get_pauseparam(struct net_device *ndev,
@@ -180,7 +304,7 @@ static int xgene_set_pauseparam(struct net_device *ndev,
struct phy_device *phydev = ndev->phydev;
u32 oldadv, newadv;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (!phydev)
return -EINVAL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 2a835e07adfb..e45b587c2994 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -205,30 +205,24 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
- struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
{
switch (status) {
case INGRESS_CRC:
ring->rx_crc_errors++;
- ring->rx_dropped++;
break;
case INGRESS_CHECKSUM:
case INGRESS_CHECKSUM_COMPUTE:
ring->rx_errors++;
- ring->rx_dropped++;
break;
case INGRESS_TRUNC_FRAME:
ring->rx_frame_errors++;
- ring->rx_dropped++;
break;
case INGRESS_PKT_LEN:
ring->rx_length_errors++;
- ring->rx_dropped++;
break;
case INGRESS_PKT_UNDER:
ring->rx_frame_errors++;
- ring->rx_dropped++;
break;
case INGRESS_FIFO_OVERRUN:
ring->rx_fifo_errors++;
@@ -270,42 +264,39 @@ static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
iowrite32(val, addr);
}
-static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
- void __iomem *cmd, void __iomem *cmd_done,
- u32 wr_addr, u32 wr_data)
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
{
- u32 done;
+ void __iomem *addr, *wr, *cmd, *cmd_done;
+ struct net_device *ndev = pdata->ndev;
u8 wait = 10;
+ u32 done;
+
+ if (pdata->mdio_driver && ndev->phydev &&
+ phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+ struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+ return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data);
+ }
+
+ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+ wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
+ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ spin_lock(&pdata->mac_lock);
iowrite32(wr_addr, addr);
iowrite32(wr_data, wr);
iowrite32(XGENE_ENET_WR_CMD, cmd);
- /* wait for write command to complete */
while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
if (!done)
- return false;
+ netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
+ wr_addr, wr_data);
iowrite32(0, cmd);
-
- return true;
-}
-
-static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
- u32 wr_addr, u32 wr_data)
-{
- void __iomem *addr, *wr, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
- netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
- wr_addr);
+ spin_unlock(&pdata->mac_lock);
}
static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
@@ -332,42 +323,69 @@ static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
*val = ioread32(addr);
}
-static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
- void __iomem *cmd, void __iomem *cmd_done,
- u32 rd_addr, u32 *rd_data)
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
{
- u32 done;
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+ struct net_device *ndev = pdata->ndev;
+ u32 done, rd_data;
u8 wait = 10;
+ if (pdata->mdio_driver && ndev->phydev &&
+ phy_interface_mode_is_rgmii(pdata->phy_mode)) {
+ struct mii_bus *bus = ndev->phydev->mdio.bus;
+
+ return xgene_mdio_rd_mac(bus->priv, rd_addr);
+ }
+
+ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+ rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
+ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+ spin_lock(&pdata->mac_lock);
iowrite32(rd_addr, addr);
iowrite32(XGENE_ENET_RD_CMD, cmd);
- /* wait for read command to complete */
while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
if (!done)
- return false;
+ netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr);
- *rd_data = ioread32(rd);
+ rd_data = ioread32(rd);
iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
- return true;
+ return rd_data;
}
-static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
- u32 rd_addr, u32 *rd_data)
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
{
void __iomem *addr, *rd, *cmd, *cmd_done;
+ u32 done, rd_data;
+ u8 wait = 10;
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
+ rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
+ cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
+
+ spin_lock(&pdata->stats_lock);
+ iowrite32(rd_addr, addr);
+ iowrite32(XGENE_ENET_RD_CMD, cmd);
- if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
- netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n",
rd_addr);
+
+ rd_data = ioread32(rd);
+ iowrite32(0, cmd);
+ spin_unlock(&pdata->stats_lock);
+
+ return rd_data;
}
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
@@ -379,8 +397,8 @@ static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
(dev_addr[1] << 8) | dev_addr[0];
addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
- xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
- xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
+ xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
+ xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
}
static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
@@ -405,8 +423,8 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
{
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
@@ -456,8 +474,8 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
- xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
+ mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
+ intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
switch (pdata->phy_speed) {
@@ -495,8 +513,8 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
}
mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
- xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
+ xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
xgene_enet_configure_clock(pdata);
@@ -506,7 +524,7 @@ static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
{
- xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
+ xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
}
static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
@@ -528,14 +546,14 @@ static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
if (enable)
data |= TX_FLOW_EN;
else
data &= ~TX_FLOW_EN;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
pdata->mac_ops->enable_tx_pause(pdata, enable);
}
@@ -544,14 +562,14 @@ static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
if (enable)
data |= RX_FLOW_EN;
else
data &= ~RX_FLOW_EN;
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
}
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
@@ -565,9 +583,9 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
xgene_gmac_set_mac_addr(pdata);
/* Adjust MDC clock frequency */
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
+ value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&value, 7);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
+ xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
/* Enable drop if bufpool not available */
xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
@@ -600,6 +618,18 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
}
+static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 count;
+
+ xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */
+ xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
{
u32 val = 0xffffffff;
@@ -637,32 +667,32 @@ static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
}
static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
}
static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
}
static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
+ data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
+ xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
@@ -733,27 +763,6 @@ static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < pdata->rxq_cnt; i++) {
- ring = pdata->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = pdata->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
-
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < pdata->txq_cnt; i++) {
- ring = pdata->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(pdata->clk))
@@ -1009,6 +1018,7 @@ const struct xgene_mac_ops xgene_gmac_ops = {
.tx_enable = xgene_gmac_tx_enable,
.rx_disable = xgene_gmac_rx_disable,
.tx_disable = xgene_gmac_tx_disable,
+ .get_drop_cnt = xgene_gmac_get_drop_cnt,
.set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr,
.set_framesize = xgene_enet_set_frame_size,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d250bfe94d24..5d3e18d3c94c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -115,6 +115,7 @@ enum xgene_enet_rm {
#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
#define BLOCK_ETH_MAC_OFFSET 0x0000
+#define BLOCK_ETH_STATS_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
#define CLKEN_ADDR 0xc208
@@ -126,6 +127,12 @@ enum xgene_enet_rm {
#define MAC_READ_REG_OFFSET 0x0c
#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+#define STAT_ADDR_REG_OFFSET 0x14
+#define STAT_COMMAND_REG_OFFSET 0x18
+#define STAT_WRITE_REG_OFFSET 0x1c
+#define STAT_READ_REG_OFFSET 0x20
+#define STAT_COMMAND_DONE_REG_OFFSET 0x24
+
#define PCS_ADDR_REG_OFFSET 0x00
#define PCS_COMMAND_REG_OFFSET 0x04
#define PCS_WRITE_REG_OFFSET 0x08
@@ -185,6 +192,10 @@ enum xgene_enet_rm {
#define CFG_CLE_NXTFPSEL0(val) (((val) << 20) & GENMASK(23, 20))
#define ICM_CONFIG0_REG_0_ADDR 0x0400
#define ICM_CONFIG2_REG_0_ADDR 0x0410
+#define ECM_CONFIG0_REG_0_ADDR 0x0500
+#define ECM_CONFIG0_REG_1_ADDR 0x0504
+#define ICM_ECM_DROP_COUNT_REG0_ADDR 0x0508
+#define ICM_ECM_DROP_COUNT_REG1_ADDR 0x050c
#define RX_DV_GATE_REG_0_ADDR 0x05fc
#define TX_DV_GATE_EN0 BIT(2)
#define RX_DV_GATE_EN0 BIT(1)
@@ -217,12 +228,53 @@ enum xgene_enet_rm {
#define FULL_DUPLEX2 BIT(0)
#define PAD_CRC BIT(2)
#define LENGTH_CHK BIT(4)
-#define SCAN_AUTO_INCR BIT(5)
-#define TBYT_ADDR 0x38
-#define TPKT_ADDR 0x39
-#define TDRP_ADDR 0x45
-#define TFCS_ADDR 0x47
-#define TUND_ADDR 0x4a
+
+#define TR64_ADDR 0x20
+#define TR127_ADDR 0x21
+#define TR255_ADDR 0x22
+#define TR511_ADDR 0x23
+#define TR1K_ADDR 0x24
+#define TRMAX_ADDR 0x25
+#define TRMGV_ADDR 0x26
+
+#define RFCS_ADDR 0x29
+#define RMCA_ADDR 0x2a
+#define RBCA_ADDR 0x2b
+#define RXCF_ADDR 0x2c
+#define RXPF_ADDR 0x2d
+#define RXUO_ADDR 0x2e
+#define RALN_ADDR 0x2f
+#define RFLR_ADDR 0x30
+#define RCDE_ADDR 0x31
+#define RCSE_ADDR 0x32
+#define RUND_ADDR 0x33
+#define ROVR_ADDR 0x34
+#define RFRG_ADDR 0x35
+#define RJBR_ADDR 0x36
+#define RDRP_ADDR 0x37
+
+#define TMCA_ADDR 0x3a
+#define TBCA_ADDR 0x3b
+#define TXPF_ADDR 0x3c
+#define TDFR_ADDR 0x3d
+#define TEDF_ADDR 0x3e
+#define TSCL_ADDR 0x3f
+#define TMCL_ADDR 0x40
+#define TLCL_ADDR 0x41
+#define TXCL_ADDR 0x42
+#define TNCL_ADDR 0x43
+#define TPFH_ADDR 0x44
+#define TDRP_ADDR 0x45
+#define TJBR_ADDR 0x46
+#define TFCS_ADDR 0x47
+#define TXCF_ADDR 0x48
+#define TOVR_ADDR 0x49
+#define TUND_ADDR 0x4a
+#define TFRG_ADDR 0x4b
+#define DUMP_ADDR 0x27
+
+#define ECM_DROP_COUNT(src) xgene_get_bits(src, 0, 15)
+#define ICM_DROP_COUNT(src) xgene_get_bits(src, 16, 31)
#define TSO_IPPROTO_TCP 1
@@ -380,14 +432,16 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
- struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
-
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
int xgene_enet_phy_connect(struct net_device *ndev);
void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata);
+u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr);
+void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr,
+ u32 wr_data);
+u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr);
extern const struct xgene_mac_ops xgene_gmac_ops;
extern const struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f37ed3506d5..d3906f6b01bd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -246,9 +246,9 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
skb_frag_t *frag;
dma_addr_t *frag_dma_addr;
u16 skb_index;
- u8 status;
- int i, ret = 0;
u8 mss_index;
+ u8 status;
+ int i;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -275,19 +275,17 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
- xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
- status);
- ret = -EIO;
+ cp_ring->tx_dropped++;
+ cp_ring->tx_errors++;
}
if (likely(skb)) {
dev_kfree_skb_any(skb);
} else {
netdev_err(cp_ring->ndev, "completion skb is NULL\n");
- ret = -EIO;
}
- return ret;
+ return 0;
}
static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
@@ -658,6 +656,18 @@ static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
buf_pool->head = head;
}
+/* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
+static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
+{
+ if (status == INGRESS_CRC &&
+ len >= (ETHER_STD_PACKET + 1) &&
+ len <= (ETHER_STD_PACKET + 4) &&
+ skb->protocol == htons(ETH_P_8021Q))
+ return true;
+
+ return false;
+}
+
/* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
{
@@ -708,10 +718,15 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status)) {
- if (!xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
+ pdata->false_rflr++;
+ } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
+ pdata->vlan_rjbr++;
+ } else {
dev_kfree_skb_any(skb);
xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
- xgene_enet_parse_error(rx_ring, pdata, status);
+ xgene_enet_parse_error(rx_ring, status);
+ rx_ring->rx_dropped++;
goto out;
}
}
@@ -1466,10 +1481,9 @@ err:
static void xgene_enet_get_stats64(
struct net_device *ndev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct rtnl_link_stats64 *stats = &pdata->stats;
struct xgene_enet_desc_ring *ring;
int i;
@@ -1478,6 +1492,8 @@ static void xgene_enet_get_stats64(
if (ring) {
stats->tx_packets += ring->tx_packets;
stats->tx_bytes += ring->tx_bytes;
+ stats->tx_dropped += ring->tx_dropped;
+ stats->tx_errors += ring->tx_errors;
}
}
@@ -1486,14 +1502,18 @@ static void xgene_enet_get_stats64(
if (ring) {
stats->rx_packets += ring->rx_packets;
stats->rx_bytes += ring->rx_bytes;
- stats->rx_errors += ring->rx_length_errors +
+ stats->rx_dropped += ring->rx_dropped;
+ stats->rx_errors += ring->rx_errors +
+ ring->rx_length_errors +
ring->rx_crc_errors +
ring->rx_frame_errors +
ring->rx_fifo_errors;
- stats->rx_dropped += ring->rx_dropped;
+ stats->rx_length_errors += ring->rx_length_errors;
+ stats->rx_crc_errors += ring->rx_crc_errors;
+ stats->rx_frame_errors += ring->rx_frame_errors;
+ stats->rx_fifo_errors += ring->rx_fifo_errors;
}
}
- memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
}
static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
@@ -1614,7 +1634,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
struct device *dev = &pdev->dev;
int i, ret, max_irqs;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode))
max_irqs = 1;
else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
max_irqs = 2;
@@ -1740,7 +1760,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
}
- if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
+ if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
dev_err(dev, "Incorrect phy-connection-type specified\n");
@@ -1785,15 +1805,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
+ pdata->mcx_stats_addr =
+ pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
offset = (pdata->enet_id == XGENE_ENET1) ?
BLOCK_ETH_MAC_CSR_OFFSET :
X2_BLOCK_ETH_MAC_CSR_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
+ pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
}
@@ -1881,6 +1904,9 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
{
switch (pdata->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
pdata->mac_ops = &xgene_gmac_ops;
pdata->port_ops = &xgene_gport_ops;
pdata->rm = RM3;
@@ -2055,6 +2081,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
xgene_enet_setup_ops(pdata);
+ spin_lock_init(&pdata->mac_lock);
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
@@ -2076,7 +2103,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
INIT_DELAYED_WORK(&pdata->link_work, link_state);
} else if (!pdata->mdio_driver) {
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(pdata->phy_mode))
ret = xgene_enet_mdio_config(pdata);
else
INIT_DELAYED_WORK(&pdata->link_work, link_state);
@@ -2085,6 +2112,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err1;
}
+ spin_lock_init(&pdata->stats_lock);
+ ret = xgene_extd_stats_init(pdata);
+ if (ret)
+ goto err2;
+
xgene_enet_napi_add(pdata);
ret = register_netdev(ndev);
if (ret) {
@@ -2102,7 +2134,7 @@ err2:
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
- else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
err1:
xgene_enet_delete_desc_rings(pdata);
@@ -2126,12 +2158,12 @@ static int xgene_enet_remove(struct platform_device *pdev)
if (pdata->mdio_driver)
xgene_enet_phy_disconnect(pdata);
- else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
- pdata->port_ops->shutdown(pdata);
xgene_enet_delete_desc_rings(pdata);
+ pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 0d4be2425ebc..985768596900 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -42,6 +42,7 @@
#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64
+#define ETHER_STD_PACKET 1518
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
@@ -138,6 +139,8 @@ struct xgene_enet_desc_ring {
__le64 *exp_bufs;
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
+ u64 tx_errors;
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
@@ -155,6 +158,7 @@ struct xgene_mac_ops {
void (*rx_enable)(struct xgene_enet_pdata *pdata);
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
+ void (*get_drop_cnt)(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
void (*set_framesize)(struct xgene_enet_pdata *pdata, int framesize);
@@ -212,6 +216,7 @@ struct xgene_enet_pdata {
void __iomem *eth_diag_csr_addr;
void __iomem *mcx_mac_addr;
void __iomem *mcx_mac_csr_addr;
+ void __iomem *mcx_stats_addr;
void __iomem *base_addr;
void __iomem *pcs_addr;
void __iomem *ring_csr_addr;
@@ -219,8 +224,12 @@ struct xgene_enet_pdata {
int phy_mode;
enum xgene_enet_rm rm;
struct xgene_enet_cle cle;
- struct rtnl_link_stats64 stats;
+ u64 *extd_stats;
+ u64 false_rflr;
+ u64 vlan_rjbr;
+ spinlock_t stats_lock; /* statistics lock */
const struct xgene_mac_ops *mac_ops;
+ spinlock_t mac_lock; /* mac lock */
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
const struct xgene_cle_ops *cle_ops;
@@ -263,5 +272,6 @@ static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
}
void xgene_enet_set_ethtool_ops(struct net_device *netdev);
+int xgene_extd_stats_init(struct xgene_enet_pdata *pdata);
#endif /* __XGENE_ENET_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index a8e063bdee3b..b1a83fdbefb8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -54,41 +54,6 @@ static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
iowrite32(val, addr);
}
-static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
- u32 wr_addr, u32 wr_data)
-{
- int i;
-
- iowrite32(wr_addr, ctl->addr);
- iowrite32(wr_data, ctl->ctl);
- iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
-
- /* wait for write command to complete */
- for (i = 0; i < 10; i++) {
- if (ioread32(ctl->cmd_done)) {
- iowrite32(0, ctl->cmd);
- return true;
- }
- udelay(1);
- }
-
- return false;
-}
-
-static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
- u32 wr_addr, u32 wr_data)
-{
- struct xgene_indirect_ctl ctl = {
- .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
- .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
- .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
- .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
- };
-
- if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
- netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
-}
-
static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
{
return ioread32(p->eth_csr_addr + offset);
@@ -104,42 +69,6 @@ static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
return ioread32(p->mcx_mac_csr_addr + offset);
}
-static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
-{
- u32 rd_data;
- int i;
-
- iowrite32(rd_addr, ctl->addr);
- iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
-
- /* wait for read command to complete */
- for (i = 0; i < 10; i++) {
- if (ioread32(ctl->cmd_done)) {
- rd_data = ioread32(ctl->ctl);
- iowrite32(0, ctl->cmd);
-
- return rd_data;
- }
- udelay(1);
- }
-
- pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
-
- return 0;
-}
-
-static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
-{
- struct xgene_indirect_ctl ctl = {
- .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
- .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
- .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
- .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
- };
-
- return xgene_enet_rd_indirect(&ctl, rd_addr);
-}
-
static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
{
struct net_device *ndev = p->ndev;
@@ -166,6 +95,24 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
return -ENODEV;
}
+static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 addr, count;
+
+ addr = (pdata->enet_id != XGENE_ENET1) ?
+ XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR :
+ ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4;
+ count = xgene_enet_rd_mcx_csr(pdata, addr);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+ addr = (pdata->enet_id != XGENE_ENET1) ?
+ XG_MCX_ECM_CONFIG0_REG_0_ADDR :
+ ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4;
+ xgene_enet_rd_mcx_csr(pdata, addr);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
u32 val;
@@ -587,26 +534,6 @@ static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
struct device *dev = &p->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < p->rxq_cnt; i++) {
- ring = p->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = p->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- }
- xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < p->txq_cnt; i++) {
- ring = p->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(p->clk))
@@ -671,6 +598,7 @@ const struct xgene_mac_ops xgene_sgmac_ops = {
.tx_enable = xgene_sgmac_tx_enable,
.rx_disable = xgene_sgmac_rx_disable,
.tx_disable = xgene_sgmac_tx_disable,
+ .get_drop_cnt = xgene_sgmac_get_drop_cnt,
.set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr,
.set_framesize = xgene_sgmac_set_frame_size,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 423240c97d39..b7d75d067c7a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -71,21 +71,6 @@ static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
return true;
}
-static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
- u32 wr_addr, u32 wr_data)
-{
- void __iomem *addr, *wr, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
- netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
- wr_addr);
-}
-
static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
u32 wr_addr, u32 wr_data)
{
@@ -148,21 +133,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
-static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
- u32 rd_addr, u32 *rd_data)
-{
- void __iomem *addr, *rd, *cmd, *cmd_done;
-
- addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
- rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
- cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
-
- if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
- netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
- rd_addr);
-}
-
static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -210,6 +180,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
return 0;
}
+static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
+ u32 *rx, u32 *tx)
+{
+ u32 count;
+
+ xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count);
+ *rx = ICM_DROP_COUNT(count);
+ *tx = ECM_DROP_COUNT(count);
+ /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
+ xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count);
+}
+
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
{
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
@@ -300,7 +282,7 @@ static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
if (enable)
data |= HSTTCTLEN;
@@ -316,7 +298,7 @@ static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
if (enable)
data |= HSTRCTLEN;
@@ -332,7 +314,7 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_xgmac_reset(pdata);
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
data |= HSTPPEN;
data &= ~HSTLENCHK;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
@@ -379,7 +361,7 @@ static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
}
@@ -387,7 +369,7 @@ static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
}
@@ -395,7 +377,7 @@ static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
}
@@ -403,7 +385,7 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
- xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
}
@@ -464,26 +446,6 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- struct xgene_enet_desc_ring *ring;
- u32 pb;
- int i;
-
- pb = 0;
- for (i = 0; i < pdata->rxq_cnt; i++) {
- ring = pdata->rx_ring[i]->buf_pool;
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- ring = pdata->rx_ring[i]->page_pool;
- if (ring)
- pb |= BIT(xgene_enet_get_fpsel(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
-
- pb = 0;
- for (i = 0; i < pdata->txq_cnt; i++) {
- ring = pdata->tx_ring[i];
- pb |= BIT(xgene_enet_ring_bufnum(ring->id));
- }
- xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
if (dev->of_node) {
if (!IS_ERR(pdata->clk))
@@ -567,6 +529,7 @@ const struct xgene_mac_ops xgene_xgmac_ops = {
.set_mac_addr = xgene_xgmac_set_mac_addr,
.set_framesize = xgene_xgmac_set_frame_size,
.set_mss = xgene_xgmac_set_mss,
+ .get_drop_cnt = xgene_xgmac_get_drop_cnt,
.link_state = xgene_enet_link_state,
.enable_tx_pause = xgene_xgmac_enable_tx_pause,
.flowctl_rx = xgene_xgmac_flowctl_rx,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index e644a429ebf4..a3b45517df45 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -23,6 +23,7 @@
#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
+#define BLOCK_AXG_STATS_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
#define BLOCK_PCS_OFFSET 0x3800
@@ -70,6 +71,8 @@
#define XG_RSIF_CONFIG1_REG_ADDR 0x00b8
#define XG_RSIF_PLC_CLE_BUFF_THRESH 0x1
#define RSIF_PLC_CLE_BUFF_THRESH_SET(dst, val) xgene_set_bits(dst, val, 0, 2)
+#define XG_MCX_ECM_CONFIG0_REG_0_ADDR 0x0070
+#define XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR 0x0124
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
@@ -80,6 +83,8 @@
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+#define XGENET_ECM_CONFIG0_REG_0 0x0870
+#define XGENET_ICM_ECM_DROP_COUNT_REG0 0x0924
#define XGENET_CSR_ECM_CFG_0_ADDR 0x0880
#define XGENET_CSR_MULTI_DPF0_ADDR 0x0888
#define XGENET_CSR_MULTI_DPF1_ADDR 0x088c
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 3a8a4aa13687..9a0817938eca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -223,7 +223,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
skb->protocol = eth_type_trans(skb, ndev);
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
- __skb_mark_checksum_bad(skb);
+ skb->ip_summed = CHECKSUM_NONE;
} else {
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a851f95c307a..14c236e5bdb1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10303,7 +10303,7 @@ sp_rtnl_not_reset:
}
if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
&bp->sp_rtnl_state)){
- if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
+ if (netif_carrier_ok(bp->dev)) {
bnx2x_tx_disable(bp);
BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
}
@@ -15351,6 +15351,7 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
bp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 03f55daecb20..69b6829ef1d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -582,7 +582,8 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
if (!page)
return NULL;
- *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
__free_page(page);
return NULL;
@@ -601,8 +602,9 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
if (!data)
return NULL;
- *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir);
+ *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
@@ -705,8 +707,9 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return -ENOMEM;
}
- mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
return -EIO;
@@ -799,7 +802,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
payload = eth_get_headlen(data_ptr, len);
@@ -841,8 +845,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
}
skb = build_skb(data, 0);
- dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
kfree(data);
return NULL;
@@ -909,8 +913,9 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
return NULL;
}
- dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
+ PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
skb->data_len += frag_len;
skb->len += frag_len;
@@ -1329,8 +1334,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
- dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
if (!skb) {
kfree(data);
@@ -1971,9 +1977,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!data)
continue;
- dma_unmap_single(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&pdev->dev,
+ tpa_info->mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
tpa_info->data = NULL;
@@ -1993,13 +2001,15 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
- dma_unmap_page(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir);
+ dma_unmap_page_attrs(&pdev->dev, mapping,
+ PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
__free_page(data);
} else {
- dma_unmap_single(&pdev->dev, mapping,
- bp->rx_buf_use_size,
- bp->rx_dir);
+ dma_unmap_single_attrs(&pdev->dev, mapping,
+ bp->rx_buf_use_size,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
kfree(data);
}
}
@@ -2012,8 +2022,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
if (!page)
continue;
- dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE,
+ PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 927617cbf6a9..ba012427edd6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1421,7 +1421,7 @@ static bool fw_type_is_none(void)
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
- int i;
+ int i, refcount;
struct msix_entry *msix_entries;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
@@ -1556,10 +1556,14 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
+ refcount = octeon_deregister_device(oct);
+
if (!fw_type_is_none()) {
- /* Soft reset the octeon device before exiting */
- if (!OCTEON_CN23XX_PF(oct) ||
- (OCTEON_CN23XX_PF(oct) && !oct->octeon_id))
+ /* Soft reset the octeon device before exiting.
+ * Implementation note: here, we reset the device
+ * if it is a CN6XXX OR the last CN23XX device.
+ */
+ if (OCTEON_CN6XXX(oct) || !refcount)
oct->fn_list.soft_reset(oct);
}
@@ -3020,6 +3024,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -3694,6 +3699,9 @@ static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
struct octeon_device *oct = lio->oct_dev;
int retval;
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
if (!retval)
cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
@@ -4511,6 +4519,15 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+ /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
+ * since that is what is required for the reference to be removed
+ * during de-initialization (see 'octeon_destroy_resources').
+ */
+ octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
+ PCI_SLOT(octeon_dev->pci_dev->devfn),
+ PCI_FUNC(octeon_dev->pci_dev->devfn),
+ true);
+
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
if (OCTEON_CN23XX_PF(octeon_dev)) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 34c77821fad9..31d737c22648 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -879,8 +879,6 @@ liquidio_vf_probe(struct pci_dev *pdev,
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
- u16 status;
-
pci_save_state(oct->pci_dev);
pci_cfg_access_lock(oct->pci_dev);
@@ -889,20 +887,7 @@ static void octeon_pci_flr(struct octeon_device *oct)
pci_write_config_word(oct->pci_dev, PCI_COMMAND,
PCI_COMMAND_INTX_DISABLE);
- /* Wait for Transaction Pending bit clean */
- msleep(100);
- pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND) {
- dev_info(&oct->pci_dev->dev, "Function reset incomplete after 100ms, sleeping for 5 seconds\n");
- ssleep(5);
- pcie_capability_read_word(oct->pci_dev, PCI_EXP_DEVSTA,
- &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&oct->pci_dev->dev, "Function reset still incomplete after 5s, reset anyway\n");
- }
- pcie_capability_set_word(oct->pci_dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ pcie_flr(oct->pci_dev);
pci_cfg_access_unlock(oct->pci_dev);
@@ -2100,6 +2085,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
conf.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index e21b477d0159..3b7cc9320deb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -543,7 +543,11 @@ static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
"BASE", "NIC", "UNKNOWN"};
static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
+
static u32 octeon_device_count;
+/* locks device array (i.e. octeon_device[]) */
+static spinlock_t octeon_devices_lock;
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -561,6 +565,7 @@ void octeon_init_device_list(int conf_type)
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
+ spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
@@ -720,23 +725,27 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
u32 oct_idx = 0;
struct octeon_device *oct = NULL;
+ spin_lock(&octeon_devices_lock);
+
for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
if (!octeon_device[oct_idx])
break;
- if (oct_idx == MAX_OCTEON_DEVICES)
- return NULL;
+ if (oct_idx < MAX_OCTEON_DEVICES) {
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (oct) {
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+ }
+ }
- oct = octeon_allocate_device_mem(pci_id, priv_size);
+ spin_unlock(&octeon_devices_lock);
if (!oct)
return NULL;
spin_lock_init(&oct->pci_win_lock);
spin_lock_init(&oct->mem_access_lock);
- octeon_device_count++;
- octeon_device[oct_idx] = oct;
-
oct->octeon_id = oct_idx;
snprintf(oct->device_name, sizeof(oct->device_name),
"LiquidIO%d", (oct->octeon_id));
@@ -744,6 +753,72 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
return oct;
}
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf)
+{
+ int idx, refcount;
+
+ oct->loc.bus = bus;
+ oct->loc.dev = dev;
+ oct->loc.func = func;
+
+ oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
+ atomic_set(oct->adapter_refcount, 0);
+
+ spin_lock(&octeon_devices_lock);
+ for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
+ if (!octeon_device[idx]) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: Internal driver error, missing dev",
+ __func__);
+ spin_unlock(&octeon_devices_lock);
+ atomic_inc(oct->adapter_refcount);
+ return 1; /* here, refcount is guaranteed to be 1 */
+ }
+ /* if another device is at same bus/dev, use its refcounter */
+ if ((octeon_device[idx]->loc.bus == bus) &&
+ (octeon_device[idx]->loc.dev == dev)) {
+ oct->adapter_refcount =
+ octeon_device[idx]->adapter_refcount;
+ break;
+ }
+ }
+ spin_unlock(&octeon_devices_lock);
+
+ atomic_inc(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct)
+{
+ int refcount;
+
+ atomic_dec(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
int
octeon_allocate_ioq_vector(struct octeon_device *oct)
{
@@ -1354,13 +1429,15 @@ int lio_get_device_id(void *dev)
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
u64 instr_cnt;
+ u32 pkts_pend;
struct octeon_device *oct = NULL;
/* the whole thing needs to be atomic, ideally */
if (droq) {
+ pkts_pend = (u32)atomic_read(&droq->pkts_pending);
spin_lock_bh(&droq->lock);
- writel(droq->pkt_count, droq->pkts_sent_reg);
- droq->pkt_count = 0;
+ writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
+ droq->pkt_count = pkts_pend;
/* this write needs to be flushed before we release the lock */
mmiowb();
spin_unlock_bh(&droq->lock);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 92f67de111aa..c90ed48ae8ab 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -544,6 +544,14 @@ struct octeon_device {
u32 tx_max_coalesced_frames;
bool cores_crashed;
+
+ struct {
+ int bus;
+ int dev;
+ int func;
+ } loc;
+
+ atomic_t *adapter_refcount; /* reference count of adapter */
};
#define OCT_DRV_ONLINE 1
@@ -572,6 +580,23 @@ void octeon_free_device_mem(struct octeon_device *oct);
struct octeon_device *octeon_allocate_device(u32 pci_id,
u32 priv_size);
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf);
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct);
+
/** Initialize the driver's dispatch list which is a mix of a hash table
* and a linked list. This is done at driver load time.
* @param octeon_dev - pointer to the octeon device structure.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5063a12613e5..5c3c8da976f7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -251,7 +251,7 @@ union octeon_instr_64B {
/** The size of each buffer in soft command buffer pool
*/
-#define SOFT_COMMAND_BUFFER_SIZE 1536
+#define SOFT_COMMAND_BUFFER_SIZE 2048
struct octeon_soft_command {
/** Soft command buffer info. */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 5cca73b8880b..57af7df74ced 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -178,7 +178,10 @@ int octeon_mbox_write(struct octeon_device *oct,
break;
}
}
- writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ if (ret == OCTEON_MBOX_STATUS_SUCCESS)
+ writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ else
+ break;
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index c9376fe075bc..1def22afeff1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -20,16 +20,16 @@
/* Macros for Mail Box Communication */
-#define OCTEON_MBOX_DATA_MAX 32
+#define OCTEON_MBOX_DATA_MAX 32
#define OCTEON_VF_ACTIVE 0x1
#define OCTEON_VF_FLR_REQUEST 0x2
#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
/*Macro for Read acknowldgement*/
-#define OCTEON_PFVFACK 0xffffffffffffffff
-#define OCTEON_PFVFSIG 0x1122334455667788
-#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD
+#define OCTEON_PFVFACK 0xffffffffffffffffULL
+#define OCTEON_PFVFSIG 0x1122334455667788ULL
+#define OCTEON_PFVFERR 0xDEADDEADDEADDEADULL
#define LIO_MBOX_WRITE_WAIT_CNT 1000
#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
@@ -74,8 +74,8 @@ enum octeon_mbox_state {
OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4,
OCTEON_MBOX_STATE_RESPONSE_PENDING = 8,
OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16,
- OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 16,
- OCTEON_MBOX_STATE_ERROR = 32
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 32,
+ OCTEON_MBOX_STATE_ERROR = 64
};
struct octeon_mbox {
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index a2138686c605..2887bcaf6af5 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -755,6 +755,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
p->has_rx_tstamp = have_hw_timestamps;
config.rx_filter = HWTSTAMP_FILTER_ALL;
if (p->has_rx_tstamp) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e88c1808e46f..1cf3e2f89fc1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1551,6 +1551,7 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
+int t4_update_port_info(struct port_info *pi);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 0ba7866c8259..e9bab72253bb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -500,7 +500,11 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
} else if (port_type == FW_PORT_TYPE_SFP ||
port_type == FW_PORT_TYPE_QSFP_10G ||
port_type == FW_PORT_TYPE_QSA ||
- port_type == FW_PORT_TYPE_QSFP) {
+ port_type == FW_PORT_TYPE_QSFP ||
+ port_type == FW_PORT_TYPE_CR4_QSFP ||
+ port_type == FW_PORT_TYPE_CR_QSFP ||
+ port_type == FW_PORT_TYPE_CR2_QSFP ||
+ port_type == FW_PORT_TYPE_SFP28) {
if (mod_type == FW_PORT_MOD_TYPE_LR ||
mod_type == FW_PORT_MOD_TYPE_SR ||
mod_type == FW_PORT_MOD_TYPE_ER ||
@@ -511,6 +515,9 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
return PORT_DA;
else
return PORT_OTHER;
+ } else if (port_type == FW_PORT_TYPE_KR4_100G ||
+ port_type == FW_PORT_TYPE_KR_SFP28) {
+ return PORT_NONE;
}
return PORT_OTHER;
@@ -618,7 +625,21 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_SFP28:
SET_LMM(FIBRE);
- SET_LMM(25000baseCR_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR_SFP28:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(50000baseSR2_Full);
break;
case FW_PORT_TYPE_KR4_100G:
@@ -674,13 +695,20 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
static int get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- const struct port_info *pi = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
struct ethtool_link_settings *base = &link_ksettings->base;
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+ /* For the nonce, the Firmware doesn't send up Port State changes
+ * when the Virtual Interface attached to the Port is down. So
+ * if it's down, let's grab any changes.
+ */
+ if (!netif_running(dev))
+ (void)t4_update_port_info(pi);
+
base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
if (pi->mdio_addr >= 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38a5c6764bb5..2ae54d54aea8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -891,7 +891,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
* The skb's priority is determined via the VLAN Tag Priority Code
* Point field.
*/
- if (cxgb4_dcb_enabled(dev)) {
+ if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
u16 vlan_tci;
int err;
@@ -2245,6 +2245,13 @@ static int cxgb_open(struct net_device *dev)
return err;
}
+ /* It's possible that the basic port information could have
+ * changed since we first read it.
+ */
+ err = t4_update_port_info(pi);
+ if (err < 0)
+ return err;
+
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -4007,10 +4014,7 @@ static void cfg_queues(struct adapter *adap)
/* Reduce memory usage in kdump environment, disable all offload.
*/
- if (is_kdump_kernel()) {
- adap->params.offload = 0;
- adap->params.crypto = 0;
- } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
adap->params.offload = 0;
adap->params.crypto = 0;
}
@@ -4031,7 +4035,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = 8;
+ pi->nqsets = is_kdump_kernel() ? 1 : 8;
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
@@ -4044,6 +4048,9 @@ static void cfg_queues(struct adapter *adap)
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
+ if (is_kdump_kernel())
+ q10g = 1;
+
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index aded42b96f6d..b97ce4a15ae0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7355,11 +7355,41 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->fc = fc;
lc->supported = be16_to_cpu(p->u.info.pcap);
lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
+
t4_os_link_changed(adap, pi->port_id, link_ok);
}
}
/**
+ * t4_update_port_info - retrieve and update port information if changed
+ * @pi: the port_info
+ *
+ * We issue a Get Port Information Command to the Firmware and, if
+ * successful, we check to see if anything is different from what we
+ * last recorded and update things accordingly.
+ */
+int t4_update_port_info(struct port_info *pi)
+{
+ struct fw_port_cmd port_cmd;
+ int ret;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(pi->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(port_cmd));
+ ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
+ &port_cmd, sizeof(port_cmd), &port_cmd);
+ if (ret)
+ return ret;
+
+ t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
+ return 0;
+}
+
+/**
* t4_handle_fw_rpl - process a FW reply message
* @adap: the adapter
* @rpl: start of the FW message
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a323185507ec..9232becc965d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -172,6 +172,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 251a35e9795c..c65c33c03bcb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2572,6 +2572,7 @@ enum fw_port_type {
FW_PORT_TYPE_CR_QSFP,
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
+ FW_PORT_TYPE_KR_SFP28,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4b87beeabce1..6a9c8878aca0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1537,13 +1537,12 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[0]);
- if (rq_work_done < rq_work_to_do) {
+ if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete_done(napi, rq_work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1663,13 +1662,12 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
- if (work_done < work_to_do) {
+ if ((work_done < budget) && napi_complete_done(napi, work_done)) {
/* Some work done, but not enough to stay in polling,
* exit polling
*/
- napi_complete_done(napi, work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 50566243e6fa..674cf9d13b98 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.1.0.0"
+#define DRV_VER "11.4.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 36e4232ed6b8..c967f45705d9 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -49,6 +49,9 @@
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
+/* FW has detected a UE and is dumping FAT log data */
+#define POST_STAGE_FAT_LOG_START 0x0D00
+#define POST_STAGE_ARMFW_UE 0xF000 /*FW has asserted an UE*/
/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4eee18ce9be4..319eee36649b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3241,8 +3241,9 @@ void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
- u32 i;
struct device *dev = &adapter->pdev->dev;
+ u16 val;
+ u32 i;
if (be_check_error(adapter, BE_ERROR_HW))
return;
@@ -3280,15 +3281,25 @@ void be_detect_error(struct be_adapter *adapter)
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow HW to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
-
if (ue_lo || ue_hi) {
+ /* On certain platforms BE3 hardware can indicate
+ * spurious UEs. In case of a UE in the chip,
+ * the POST register correctly reports either a
+ * FAT_LOG_START state (FW is currently dumping
+ * FAT log data) or a ARMFW_UE state. Check for the
+ * above states to ascertain if the UE is valid or not.
+ */
+ if (BE3_chip(adapter)) {
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_FAT_LOG_START)
+ != POST_STAGE_FAT_LOG_START &&
+ (val & POST_STAGE_ARMFW_UE)
+ != POST_STAGE_ARMFW_UE)
+ return;
+ }
+
dev_err(dev, "Error detected in the adapter");
- if (skyhawk_chip(adapter))
- be_set_error(adapter, BE_ERROR_UE);
+ be_set_error(adapter, BE_ERROR_UE);
for (i = 0; ue_lo; ue_lo >>= 1, i++) {
if (ue_lo & 1)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 72ab7b6bf20b..9a74c4e2e193 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -46,6 +46,8 @@
#include <asm/vio.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
#include "ibmveth.h"
@@ -808,8 +810,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
- if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
- !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
+ if (ret == H_SUCCESS &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr);
@@ -1040,6 +1041,15 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
dma_addr_t dma_addr;
unsigned long mss = 0;
+ /* veth doesn't handle frag_list, so linearize the skb.
+ * When GRO is enabled SKB's can have frag_list.
+ */
+ if (adapter->is_active_trunk &&
+ skb_has_frag_list(skb) && __skb_linearize(skb)) {
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+
/*
* veth handles a maximum of 6 segments including the header, so
* we have to linearize the skb if there are more than this.
@@ -1064,9 +1074,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags = IBMVETH_BUF_VALID;
- if (skb_is_gso(skb) && adapter->fw_large_send_support)
- desc_flags |= IBMVETH_BUF_LRG_SND;
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned char *buf = skb_transport_header(skb) +
skb->csum_offset;
@@ -1076,6 +1083,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
/* Need to zero out the checksum */
buf[0] = 0;
buf[1] = 0;
+
+ if (skb_is_gso(skb) && adapter->fw_large_send_support)
+ desc_flags |= IBMVETH_BUF_LRG_SND;
}
retry_bounce:
@@ -1128,7 +1138,7 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
- if (skb_is_gso(skb)) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
adapter->tx_large_packets++;
@@ -1232,6 +1242,71 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
}
}
+static void ibmveth_rx_csum_helper(struct sk_buff *skb,
+ struct ibmveth_adapter *adapter)
+{
+ struct iphdr *iph = NULL;
+ struct ipv6hdr *iph6 = NULL;
+ __be16 skb_proto = 0;
+ u16 iphlen = 0;
+ u16 iph_proto = 0;
+ u16 tcphdrlen = 0;
+
+ skb_proto = be16_to_cpu(skb->protocol);
+
+ if (skb_proto == ETH_P_IP) {
+ iph = (struct iphdr *)skb->data;
+
+ /* If the IP checksum is not offloaded and if the packet
+ * is large send, the checksum must be rebuilt.
+ */
+ if (iph->check == 0xffff) {
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph,
+ iph->ihl);
+ }
+
+ iphlen = iph->ihl * 4;
+ iph_proto = iph->protocol;
+ } else if (skb_proto == ETH_P_IPV6) {
+ iph6 = (struct ipv6hdr *)skb->data;
+ iphlen = sizeof(struct ipv6hdr);
+ iph_proto = iph6->nexthdr;
+ }
+
+ /* In OVS environment, when a flow is not cached, specifically for a
+ * new TCP connection, the first packet information is passed up
+ * the user space for finding a flow. During this process, OVS computes
+ * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
+ *
+ * Given that we zeroed out TCP checksum field in transmit path
+ * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
+ * OVS computed checksum will be incorrect w/o TCP pseudo checksum
+ * in the packet. This leads to OVS dropping the packet and hence
+ * TCP retransmissions are seen.
+ *
+ * So, re-compute TCP pseudo header checksum.
+ */
+ if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
+ struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
+
+ tcphdrlen = skb->len - iphlen;
+
+ /* Recompute TCP pseudo header checksum */
+ if (skb_proto == ETH_P_IP)
+ tcph->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcphdrlen, iph_proto, 0);
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check = ~csum_ipv6_magic(&iph6->saddr,
+ &iph6->daddr, tcphdrlen, iph_proto, 0);
+
+ /* Setup SKB fields for checksum offload */
+ skb_partial_csum_set(skb, iphlen,
+ offsetof(struct tcphdr, check));
+ skb_reset_network_header(skb);
+ }
+}
+
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@@ -1239,7 +1314,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
- struct iphdr *iph;
u16 mss = 0;
restart_poll:
@@ -1297,17 +1371,7 @@ restart_poll:
if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
- iph = (struct iphdr *)skb->data;
-
- /* If the IP checksum is not offloaded and if the packet
- * is large send, the checksum must be rebuilt.
- */
- if (iph->check == 0xffff) {
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- }
- }
+ ibmveth_rx_csum_helper(skb, adapter);
}
if (length > netdev->mtu + ETH_HLEN) {
@@ -1626,6 +1690,13 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->hw_features |= NETIF_F_TSO;
}
+ adapter->is_active_trunk = false;
+ if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
+ adapter->is_active_trunk = true;
+ netdev->hw_features |= NETIF_F_FRAGLIST;
+ netdev->features |= NETIF_F_FRAGLIST;
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index ed8780cca982..01c587fc02c7 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -156,6 +156,7 @@ struct ibmveth_adapter {
int pool_config;
int rx_csum;
int large_send;
+ bool is_active_trunk;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f2d329dba99..8dcf58088178 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -163,6 +163,16 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
+static void reset_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb)
+{
+ memset(ltb->buff, 0, ltb->size);
+
+ init_completion(&adapter->fw_done);
+ send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
+ wait_for_completion(&adapter->fw_done);
+}
+
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -200,6 +210,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
+static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ i++)
+ adapter->rx_pool[i].active = 0;
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -217,6 +236,9 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
int index;
int i;
+ if (!pool->active)
+ return;
+
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
off_rxadd_subcrqs));
@@ -287,6 +309,15 @@ failure:
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
atomic_add(buffers_added, &pool->available);
+
+ if (lpar_rc == H_CLOSED) {
+ /* Disable buffer pool replenishment and report carrier off if
+ * queue is closed. Firmware guarantees that a signal will
+ * be sent to the driver, triggering a reset.
+ */
+ deactivate_rx_pools(adapter);
+ netif_carrier_off(adapter->netdev);
+ }
}
static void replenish_pools(struct ibmvnic_adapter *adapter)
@@ -331,6 +362,32 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
return 0;
}
+static int reset_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rx_pool *rx_pool;
+ int rx_scrqs;
+ int i, j;
+
+ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ for (i = 0; i < rx_scrqs; i++) {
+ rx_pool = &adapter->rx_pool[i];
+
+ reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+
+ for (j = 0; j < rx_pool->size; j++)
+ rx_pool->free_map[j] = j;
+
+ memset(rx_pool->rx_buff, 0,
+ rx_pool->size * sizeof(struct ibmvnic_rx_buff));
+
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ }
+
+ return 0;
+}
+
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -432,6 +489,32 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
+static int reset_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_tx_pool *tx_pool;
+ int tx_scrqs;
+ int i, j;
+
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ for (i = 0; i < tx_scrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+
+ reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+
+ memset(tx_pool->tx_buff, 0,
+ adapter->req_tx_entries_per_subcrq *
+ sizeof(struct ibmvnic_tx_buff));
+
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
+ tx_pool->free_map[j] = j;
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+ }
+
+ return 0;
+}
+
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
@@ -518,6 +601,32 @@ static void release_error_buffers(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->error_list_lock, flags);
}
+static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->napi_enabled)
+ return;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_enable(&adapter->napi[i]);
+
+ adapter->napi_enabled = true;
+}
+
+static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->napi_enabled)
+ return;
+
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+
+ adapter->napi_enabled = false;
+}
+
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -674,9 +783,7 @@ static int __ibmvnic_open(struct net_device *netdev)
adapter->state = VNIC_OPENING;
replenish_pools(adapter);
-
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_enable(&adapter->napi[i]);
+ ibmvnic_napi_enable(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and
* set the logical link state to up
@@ -779,13 +886,7 @@ static int __ibmvnic_close(struct net_device *netdev)
adapter->state = VNIC_CLOSING;
netif_tx_stop_all_queues(netdev);
-
- if (adapter->napi) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
- }
-
- clean_tx_pools(adapter);
+ ibmvnic_napi_disable(adapter);
if (adapter->tx_scrq) {
for (i = 0; i < adapter->req_tx_queues; i++)
@@ -814,6 +915,7 @@ static int __ibmvnic_close(struct net_device *netdev)
}
}
+ clean_tx_pools(adapter);
adapter->state = VNIC_CLOSED;
return rc;
}
@@ -1092,8 +1194,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED)
- netif_stop_subqueue(netdev, queue_num);
+ if (lpar_rc == H_CLOSED) {
+ /* Disable TX and report carrier off if queue is closed.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset or some other action.
+ */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
tx_send_failed++;
tx_dropped++;
@@ -1206,37 +1314,39 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
- /* remove the closed state so when we call open it appears
- * we are coming from the probed state.
- */
- adapter->state = VNIC_PROBED;
+ if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
+ /* remove the closed state so when we call open it appears
+ * we are coming from the probed state.
+ */
+ adapter->state = VNIC_PROBED;
- release_resources(adapter);
- release_sub_crqs(adapter);
- release_crq_queue(adapter);
+ rc = ibmvnic_init(adapter);
+ if (rc)
+ return 0;
- rc = ibmvnic_init(adapter);
- if (rc)
- return 0;
+ /* If the adapter was in PROBE state prior to the reset,
+ * exit here.
+ */
+ if (reset_state == VNIC_PROBED)
+ return 0;
- /* If the adapter was in PROBE state prior to the reset, exit here. */
- if (reset_state == VNIC_PROBED)
- return 0;
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ adapter->state = VNIC_PROBED;
+ return 0;
+ }
- rc = ibmvnic_login(netdev);
- if (rc) {
- adapter->state = VNIC_PROBED;
- return 0;
- }
+ rc = reset_tx_pools(adapter);
+ if (rc)
+ return rc;
- rtnl_lock();
- rc = init_resources(adapter);
- rtnl_unlock();
- if (rc)
- return rc;
+ rc = reset_rx_pools(adapter);
+ if (rc)
+ return rc;
- if (reset_state == VNIC_CLOSED)
- return 0;
+ if (reset_state == VNIC_CLOSED)
+ return 0;
+ }
rc = __ibmvnic_open(netdev);
if (rc) {
@@ -1254,6 +1364,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
+ netdev_notify_peers(netdev);
return 0;
}
@@ -1313,6 +1424,7 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
free_all_rwi(adapter);
+ mutex_unlock(&adapter->reset_lock);
return;
}
@@ -1383,6 +1495,10 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int scrq_num = (int)(napi - adapter->napi);
int frames_processed = 0;
+
+ if (adapter->resetting)
+ return 0;
+
restart_poll:
while (frames_processed < budget) {
struct sk_buff *skb;
@@ -1441,7 +1557,9 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
+
+ if (adapter->state != VNIC_CLOSING)
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1608,6 +1726,45 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
/* Routines for managing CRQs/sCRQs */
+static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ int rc;
+
+ if (scrq->irq) {
+ free_irq(scrq->irq, scrq);
+ irq_dispose_mapping(scrq->irq);
+ scrq->irq = 0;
+ }
+
+ memset(scrq->msgs, 0, 2 * PAGE_SIZE);
+ scrq->cur = 0;
+
+ rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+ 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
+ return rc;
+}
+
+static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
+{
+ int i, rc;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ if (rc)
+ return rc;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ return rc;
+}
+
static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -2742,6 +2899,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ else
+ ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
@@ -3147,6 +3306,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
switch (gen_crq->cmd) {
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
+ adapter->from_passive_init = true;
+ complete(&adapter->init_done);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -3455,21 +3616,38 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
unsigned long timeout = msecs_to_jiffies(30000);
int rc;
- rc = init_crq_queue(adapter);
+ if (adapter->resetting) {
+ rc = ibmvnic_reset_crq(adapter);
+ if (!rc)
+ rc = vio_enable_interrupts(adapter->vdev);
+ } else {
+ rc = init_crq_queue(adapter);
+ }
+
if (rc) {
dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
return rc;
}
+ adapter->from_passive_init = false;
+
init_completion(&adapter->init_done);
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
- release_crq_queue(adapter);
return -1;
}
- rc = init_sub_crqs(adapter);
+ if (adapter->from_passive_init) {
+ adapter->state = VNIC_OPEN;
+ adapter->from_passive_init = false;
+ return -1;
+ }
+
+ if (adapter->resetting)
+ rc = reset_sub_crq_queues(adapter);
+ else
+ rc = init_sub_crqs(adapter);
if (rc) {
dev_err(dev, "Initialization of sub crqs failed\n");
release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4702b48cfa44..7e2300e64a47 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -925,6 +925,7 @@ enum vnic_state {VNIC_PROBING = 1,
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
+ VNIC_RESET_NON_FATAL,
VNIC_RESET_TIMEOUT};
struct ibmvnic_rwi {
@@ -1031,4 +1032,5 @@ struct ibmvnic_adapter {
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
+ bool napi_enabled, from_passive_init;
};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b3679728caac..6ed3bc419b96 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3680,6 +3680,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
* Delay Request messages but not both so fall-through to
* time stamp all packets.
*/
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
is_l2 = true;
is_l4 = true;
@@ -5867,10 +5868,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
adapter->tx_hwtstamp_skb = skb_get(skb);
adapter->tx_hwtstamp_start = jiffies;
schedule_work(&adapter->tx_hwtstamp_work);
- } else {
- skb_tx_timestamp(skb);
}
+ skb_tx_timestamp(skb);
+
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 18c1cc08da97..0efff18ee336 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -562,6 +562,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
}
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
default:
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7a3fd4d74592..d333d6d80194 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -941,6 +941,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* 82576 cannot timestamp all packets, which it needs to do to
* support both V1 Sync and Delay_Req messages
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef0635e0918c..d44c728fdc0b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -883,6 +883,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* The X550 controller is capable of timestamping all packets,
* which allows it to accept any filter.
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f580b49e6b67..0e5083a48937 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -691,17 +691,6 @@ jme_enable_tx_engine(struct jme_adapter *jme)
}
static inline void
-jme_restart_tx_engine(struct jme_adapter *jme)
-{
- /*
- * Restart TX Engine
- */
- jwrite32(jme, JME_TXCS, jme->reg_txcs |
- TXCS_SELECT_QUEUE0 |
- TXCS_ENABLE);
-}
-
-static inline void
jme_disable_tx_engine(struct jme_adapter *jme)
{
int i;
@@ -2382,37 +2371,6 @@ jme_tx_timeout(struct net_device *netdev)
jme_reset_link(jme);
}
-static inline void jme_pause_rx(struct jme_adapter *jme)
-{
- atomic_dec(&jme->link_changing);
-
- jme_set_rx_pcc(jme, PCC_OFF);
- if (test_bit(JME_FLAG_POLL, &jme->flags)) {
- JME_NAPI_DISABLE(jme);
- } else {
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
- }
-}
-
-static inline void jme_resume_rx(struct jme_adapter *jme)
-{
- struct dynpcc_info *dpi = &(jme->dpi);
-
- if (test_bit(JME_FLAG_POLL, &jme->flags)) {
- JME_NAPI_ENABLE(jme);
- } else {
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
- }
- dpi->cur = PCC_P1;
- dpi->attempt = PCC_P1;
- dpi->cnt = 0;
- jme_set_rx_pcc(jme, PCC_P1);
-
- atomic_inc(&jme->link_changing);
-}
-
static void
jme_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d54701047401..84a200764111 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -19,5 +19,6 @@ if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxfw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 2e2a5ec509ac..016aa263bc04 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
obj-$(CONFIG_MLXSW_CORE) += mlxsw/
+obj-$(CONFIG_MLXFW) += mlxfw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 94fab20ef146..82436742ad75 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2375,6 +2375,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 27251a78075c..cf1ef48bfd8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -11,6 +11,16 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
+config MLX5_FPGA
+ bool "Mellanox Technologies Innova support"
+ depends on MLX5_CORE
+ ---help---
+ Build support for the Innova family of network cards by Mellanox
+ Technologies. Innova network cards are comprised of a ConnectX chip
+ and an FPGA chip on one board. If you select this option, the
+ mlx5_core driver will include the Innova FPGA core and allow building
+ sandbox-specific client drivers.
+
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9e644615f07a..12556c03eec4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,10 +1,13 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
+subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o
+mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o
+
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index e94a9532e218..de40b6cfee95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -405,7 +405,7 @@ static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index c8a005326e30..f4017c06ddd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -180,9 +180,8 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
@@ -237,7 +236,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kvfree(ft->g);
kvfree(in);
@@ -481,9 +480,8 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index e706a87fc8b2..e29494464cae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -128,6 +128,7 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index f1f17f7a3cd0..46e56ec4c26f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -65,7 +65,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
u32 *in;
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -147,7 +147,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8209affa75c3..e9e33fd68279 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -39,7 +39,7 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
struct mlx5_core_dev *mdev = priv->mdev;
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ strlcpy(drvinfo->version, DRIVER_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
@@ -1048,7 +1048,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
(hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 53ed58320a24..7acc4fba7ece 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -218,11 +218,9 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return -ENOMEM;
- }
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv);
@@ -660,11 +658,9 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return ERR_PTR(-ENOMEM);
- }
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -742,7 +738,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
return -ENOMEM;
@@ -852,11 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
u8 *mc_dmac;
u8 *mv_dmac;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return -ENOMEM;
- }
mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
@@ -916,7 +910,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g)
return -ENOMEM;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
return -ENOMEM;
@@ -1071,7 +1065,7 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 85bf4a389295..bdd82c9b3992 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -296,7 +296,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
err = set_flow_attrs(spec->match_criteria, spec->match_value,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 41cd22a223dc..cdff04b2aea1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -252,9 +252,9 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
void *out;
u32 *in;
- in = mlx5_vzalloc(sz);
+ in = kvzalloc(sz, GFP_KERNEL);
if (!in)
- goto free_out;
+ return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
@@ -288,7 +288,6 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5_REG_PPCNT, 0, 0);
}
-free_out:
kvfree(in);
}
@@ -314,7 +313,7 @@ static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
return;
- in = mlx5_vzalloc(sz);
+ in = kvzalloc(sz, GFP_KERNEL);
if (!in)
return;
@@ -503,7 +502,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
if (!MLX5E_VALID_NUM_MTTS(npages))
return -EINVAL;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -711,7 +710,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
sizeof(u64) * rq->wq_ctrl.buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -748,7 +747,7 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -776,7 +775,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -805,7 +804,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1134,7 +1133,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * csp->wq_ctrl->buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1182,7 +1181,7 @@ static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1496,7 +1495,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2091,7 +2090,7 @@ mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
int i;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2210,7 +2209,7 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2433,7 +2432,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2850,7 +2849,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -2889,7 +2888,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
int ix;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ec63158ab643..d2f90ba2dbc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1436,8 +1436,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- neigh_release(n);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
@@ -1542,8 +1542,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
if (!(nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
- neigh_release(n);
- return -EAGAIN;
+ err = -EAGAIN;
+ goto out;
}
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
@@ -1778,7 +1778,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
}
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
- parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!parse_attr || !flow) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 33eae5ad2fb0..0ed8e90ba54f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "fpga/core.h"
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
@@ -156,6 +157,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PAGE_FAULT";
case MLX5_EVENT_TYPE_PPS_EVENT:
return "MLX5_EVENT_TYPE_PPS_EVENT";
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_ERROR";
default:
return "Unrecognized event";
}
@@ -476,6 +479,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
if (dev->event)
dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
break;
+
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -548,7 +556,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_buf;
@@ -693,6 +701,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pps))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+ if (MLX5_CAP_GEN(dev, fpga))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2e34d95ea776..81dfcd90b1f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -248,11 +248,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
if (rx_rule)
match_header |= MLX5_MATCH_MISC_PARAMETERS;
- spec = mlx5_vzalloc(sizeof(*spec));
- if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
return NULL;
- }
+
dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -350,10 +349,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
- memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
@@ -961,7 +959,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1078,7 +1076,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
return -EOPNOTSUPP;
}
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -1241,11 +1239,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
- vport->vport, err);
goto out;
}
@@ -1322,11 +1318,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
- vport->vport, err);
goto out;
}
@@ -2158,7 +2152,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f991f669047e..3795943ef2d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -311,9 +311,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
struct mlx5_flow_spec *spec;
void *misc;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
@@ -401,9 +400,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
struct mlx5_flow_spec *spec;
int err = 0;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
err = -ENOMEM;
goto out;
}
@@ -488,7 +486,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
u32 *flow_group_in;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -631,7 +629,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int err = 0;
int nvports = priv->sriov.num_vfs + 2;
- flow_group_in = mlx5_vzalloc(inlen);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -675,9 +673,8 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
struct mlx5_flow_spec *spec;
void *misc;
- spec = mlx5_vzalloc(sizeof(*spec));
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
- esw_warn(esw->dev, "Failed to alloc match parameters\n");
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
new file mode 100644
index 000000000000..99cba644b4fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fpga/cmd.h"
+
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), caps,
+ MLX5_ST_SZ_BYTES(fpga_cap),
+ MLX5_REG_FPGA_CAP, 0, 0);
+}
+
+int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query)
+{
+ u32 in[MLX5_ST_SZ_DW(fpga_ctrl)] = {0};
+ u32 out[MLX5_ST_SZ_DW(fpga_ctrl)];
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_FPGA_CTRL, 0, false);
+ if (err)
+ return err;
+
+ query->status = MLX5_GET(fpga_ctrl, out, status);
+ query->admin_image = MLX5_GET(fpga_ctrl, out, flash_select_admin);
+ query->oper_image = MLX5_GET(fpga_ctrl, out, flash_select_oper);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
new file mode 100644
index 000000000000..a74396a61bc3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_FPGA_H__
+#define __MLX5_FPGA_H__
+
+#include <linux/mlx5/driver.h>
+
+enum mlx5_fpga_image {
+ MLX5_FPGA_IMAGE_USER = 0,
+ MLX5_FPGA_IMAGE_FACTORY,
+};
+
+enum mlx5_fpga_status {
+ MLX5_FPGA_STATUS_SUCCESS = 0,
+ MLX5_FPGA_STATUS_FAILURE = 1,
+ MLX5_FPGA_STATUS_IN_PROGRESS = 2,
+ MLX5_FPGA_STATUS_NONE = 0xFFFF,
+};
+
+struct mlx5_fpga_query {
+ enum mlx5_fpga_image admin_image;
+ enum mlx5_fpga_image oper_image;
+ enum mlx5_fpga_status status;
+};
+
+int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps);
+int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
+
+#endif /* __MLX5_FPGA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
new file mode 100644
index 000000000000..d88b332e9669
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fpga/core.h"
+
+static const char *const mlx5_fpga_error_strings[] = {
+ "Null Syndrome",
+ "Corrupted DDR",
+ "Flash Timeout",
+ "Internal Link Error",
+ "Watchdog HW Failure",
+ "I2C Failure",
+ "Image Changed",
+ "Temperature Critical",
+};
+
+static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
+{
+ struct mlx5_fpga_device *fdev = NULL;
+
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
+ if (!fdev)
+ return NULL;
+
+ spin_lock_init(&fdev->state_lock);
+ fdev->state = MLX5_FPGA_STATUS_NONE;
+ return fdev;
+}
+
+static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image)
+{
+ switch (image) {
+ case MLX5_FPGA_IMAGE_USER:
+ return "user";
+ case MLX5_FPGA_IMAGE_FACTORY:
+ return "factory";
+ default:
+ return "unknown";
+ }
+}
+
+static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
+{
+ struct mlx5_fpga_query query;
+ int err;
+
+ err = mlx5_fpga_query(fdev->mdev, &query);
+ if (err) {
+ mlx5_fpga_err(fdev, "Failed to query status: %d\n", err);
+ return err;
+ }
+
+ fdev->last_admin_image = query.admin_image;
+ fdev->last_oper_image = query.oper_image;
+
+ mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n",
+ query.status, query.admin_image, query.oper_image);
+
+ if (query.status != MLX5_FPGA_STATUS_SUCCESS) {
+ mlx5_fpga_err(fdev, "%s image failed to load; status %u\n",
+ mlx5_fpga_image_name(fdev->last_oper_image),
+ query.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ unsigned long flags;
+ int err;
+
+ if (!fdev)
+ return 0;
+
+ err = mlx5_fpga_device_load_check(fdev);
+ if (err)
+ goto out;
+
+ err = mlx5_fpga_caps(fdev->mdev,
+ fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
+ if (err)
+ goto out;
+
+ mlx5_fpga_info(fdev, "device %u; %s image, version %u\n",
+ MLX5_CAP_FPGA(fdev->mdev, fpga_device),
+ mlx5_fpga_image_name(fdev->last_oper_image),
+ MLX5_CAP_FPGA(fdev->mdev, image_version));
+
+out:
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ fdev->state = err ? MLX5_FPGA_STATUS_FAILURE : MLX5_FPGA_STATUS_SUCCESS;
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ return err;
+}
+
+int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_fpga_device *fdev = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, fpga)) {
+ mlx5_core_dbg(mdev, "FPGA capability not present\n");
+ return 0;
+ }
+
+ mlx5_core_dbg(mdev, "Initializing FPGA\n");
+
+ fdev = mlx5_fpga_device_alloc();
+ if (!fdev)
+ return -ENOMEM;
+
+ fdev->mdev = mdev;
+ mdev->fpga = fdev;
+
+ return 0;
+}
+
+void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev)
+{
+ kfree(mdev->fpga);
+ mdev->fpga = NULL;
+}
+
+static const char *mlx5_fpga_syndrome_to_string(u8 syndrome)
+{
+ if (syndrome < ARRAY_SIZE(mlx5_fpga_error_strings))
+ return mlx5_fpga_error_strings[syndrome];
+ return "Unknown";
+}
+
+void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+{
+ struct mlx5_fpga_device *fdev = mdev->fpga;
+ const char *event_name;
+ bool teardown = false;
+ unsigned long flags;
+ u8 syndrome;
+
+ if (event != MLX5_EVENT_TYPE_FPGA_ERROR) {
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
+ event);
+ return;
+ }
+
+ syndrome = MLX5_GET(fpga_error_event, data, syndrome);
+ event_name = mlx5_fpga_syndrome_to_string(syndrome);
+
+ spin_lock_irqsave(&fdev->state_lock, flags);
+ switch (fdev->state) {
+ case MLX5_FPGA_STATUS_SUCCESS:
+ mlx5_fpga_warn(fdev, "Error %u: %s\n", syndrome, event_name);
+ teardown = true;
+ break;
+ default:
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected error event %u: %s\n",
+ syndrome, event_name);
+ }
+ spin_unlock_irqrestore(&fdev->state_lock, flags);
+ /* We tear-down the card's interfaces and functionality because
+ * the FPGA bump-on-the-wire is misbehaving and we lose ability
+ * to communicate with the network. User may still be able to
+ * recover by re-programming or debugging the FPGA
+ */
+ if (teardown)
+ mlx5_trigger_health_work(fdev->mdev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
new file mode 100644
index 000000000000..c55044d66778
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_FPGA_CORE_H__
+#define __MLX5_FPGA_CORE_H__
+
+#ifdef CONFIG_MLX5_FPGA
+
+#include "fpga/cmd.h"
+
+/* Represents an Innova device */
+struct mlx5_fpga_device {
+ struct mlx5_core_dev *mdev;
+ spinlock_t state_lock; /* Protects state transitions */
+ enum mlx5_fpga_status state;
+ enum mlx5_fpga_image last_admin_image;
+ enum mlx5_fpga_image last_oper_image;
+};
+
+#define mlx5_fpga_dbg(__adev, format, ...) \
+ dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_err(__adev, format, ...) \
+ dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_warn(__adev, format, ...) \
+ dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
+
+#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
+ dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \
+ format, __func__, __LINE__, ##__VA_ARGS__)
+
+#define mlx5_fpga_notice(__adev, format, ...) \
+ dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+
+#define mlx5_fpga_info(__adev, format, ...) \
+ dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+
+int mlx5_fpga_device_init(struct mlx5_core_dev *mdev);
+void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev);
+int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
+void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
+
+#else
+
+static inline int mlx5_fpga_device_init(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_device_cleanup(struct mlx5_core_dev *mdev)
+{
+}
+
+static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
+ void *data)
+{
+}
+
+#endif
+
+#endif /* __MLX5_FPGA_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index fcec7bedd3cd..abb44a268563 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -232,11 +232,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
u32 *in;
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0e487e8ca634..6c636c21d24f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -376,11 +376,9 @@ static void del_rule(struct fs_node *node)
int err;
bool update_fte = false;
- match_value = mlx5_vzalloc(match_len);
- if (!match_value) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ match_value = kvzalloc(match_len, GFP_KERNEL);
+ if (!match_value)
return;
- }
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
@@ -1157,7 +1155,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return ERR_PTR(-ENOMEM);
@@ -1777,7 +1775,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
struct mlx5_flow_namespace *ns;
/* Create the root namespace */
- root_ns = mlx5_vzalloc(sizeof(*root_ns));
+ root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
if (!root_ns)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 44f59b1d6f0f..80b23333de7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -185,6 +185,7 @@ static void health_care(struct work_struct *work)
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
+ unsigned long flags;
health = container_of(work, struct mlx5_core_health, work);
priv = container_of(health, struct mlx5_priv, health);
@@ -192,13 +193,13 @@ static void health_care(struct work_struct *work)
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
dev_err(&dev->pdev->dev,
"new health works are not permitted at this stage\n");
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
}
static const char *hsynd_str(u8 synd)
@@ -269,6 +270,20 @@ static unsigned long get_next_poll_jiffies(void)
return next;
}
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
+
+ spin_lock_irqsave(&health->wq_lock, flags);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock_irqrestore(&health->wq_lock, flags);
+}
+
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
@@ -297,13 +312,7 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- spin_lock(&health->wq_lock);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->work);
- else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
- spin_unlock(&health->wq_lock);
+ mlx5_trigger_health_work(dev);
}
}
@@ -333,10 +342,11 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&health->recover_work);
cancel_work_sync(&health->work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
index cc1858752e70..22ca59145e6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -102,7 +102,7 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
void *qpc;
inlen = MLX5_ST_SZ_BYTES(create_qp_in);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index fe5546bb4153..361cd112bb5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -56,6 +56,7 @@
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
#endif
+#include "fpga/core.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -1106,10 +1107,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_disable_msix;
}
+ err = mlx5_fpga_device_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "fpga device init failed %d\n", err);
+ goto err_put_uars;
+ }
+
err = mlx5_start_eqs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
- goto err_put_uars;
+ goto err_fpga_init;
}
err = alloc_comp_eqs(dev);
@@ -1140,6 +1147,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_sriov;
}
+ err = mlx5_fpga_device_start(dev);
+ if (err) {
+ dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+ goto err_reg_dev;
+ }
+
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
@@ -1175,6 +1188,9 @@ err_affinity_hints:
err_stop_eqs:
mlx5_stop_eqs(dev);
+err_fpga_init:
+ mlx5_fpga_device_cleanup(dev);
+
err_put_uars:
mlx5_put_uars_page(dev, priv->uar);
@@ -1239,6 +1255,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
+ mlx5_fpga_device_cleanup(dev);
mlx5_put_uars_page(dev, priv->uar);
mlx5_disable_msix(dev);
if (cleanup)
@@ -1513,6 +1530,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
{ PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fbc6e9e9e305..cf69b42278df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,8 +39,7 @@
#include <linux/if_link.h>
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0-1"
-#define DRIVER_RELDATE "January 2015"
+#define DRIVER_VERSION "5.0-0"
#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index a57d5a81eb05..efcded7ca27a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -279,7 +279,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int i;
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
@@ -376,7 +376,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
*nclaimed = 0;
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 141583daf5a2..1975d4388d4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -47,8 +47,8 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
u32 *in = NULL;
void *data;
- in = mlx5_vzalloc(inlen);
- out = mlx5_vzalloc(outlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
@@ -454,7 +454,7 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u32 *in;
int err;
- in = mlx5_vzalloc(sz);
+ in = kvzalloc(sz, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index cbbcef2884be..573a6b27fed8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -527,7 +527,7 @@ int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
void *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 3099630015d7..f774de6f5fcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -162,7 +162,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -221,7 +221,7 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
void *srqc;
int err;
- srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+ srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
if (!srq_out)
return -ENOMEM;
@@ -256,7 +256,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -320,7 +320,7 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
void *xrc_srqc;
int err;
- xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
if (!xrcsrq_out)
return -ENOMEM;
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
@@ -357,7 +357,7 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
- create_in = mlx5_vzalloc(inlen);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
if (!create_in)
return -ENOMEM;
@@ -390,7 +390,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
void *bitmask;
int err;
- in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -417,7 +417,7 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
void *rmpc;
int err;
- rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
if (!rmp_out)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index a00ff49eec18..5e128d7a9ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -284,7 +284,7 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
void *bitmask;
int err;
- in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 15c2294dd2b4..06019d00ab7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -172,7 +172,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u8 *out_addr;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -197,11 +197,9 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
void *nic_vport_ctx;
u8 *perm_mac;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in,
field_select.permanent_address, 1);
@@ -231,7 +229,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
u32 *out;
int err;
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -251,7 +249,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
void *in;
int err;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -501,7 +499,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -521,7 +519,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -551,7 +549,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
return -EOPNOTSUPP;
- in = mlx5_vzalloc(inlen);
+ in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -577,7 +575,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
- out = mlx5_vzalloc(outlen);
+ out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
@@ -879,11 +877,9 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_err(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
MLX5_SET(modify_nic_vport_context_in, in,
@@ -913,11 +909,9 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
int err;
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
@@ -952,7 +946,7 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
- in = mlx5_vzalloc(in_sz);
+ in = kvzalloc(in_sz, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Kconfig b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
new file mode 100644
index 000000000000..56b60ac7bc34
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
@@ -0,0 +1,6 @@
+#
+# Mellanox firmware flash library configuration
+#
+
+config MLXFW
+ tristate "mlxfw" if COMPILE_TEST
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Makefile b/drivers/net/ethernet/mellanox/mlxfw/Makefile
new file mode 100644
index 000000000000..7448b301104c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MLXFW) += mlxfw.o
+mlxfw-objs := mlxfw_fsm.o mlxfw_mfa2_tlv_multi.o mlxfw_mfa2.o
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
new file mode 100644
index 000000000000..beea4ba83495
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -0,0 +1,102 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_H
+#define _MLXFW_H
+
+#include <linux/firmware.h>
+
+enum mlxfw_fsm_state {
+ MLXFW_FSM_STATE_IDLE,
+ MLXFW_FSM_STATE_LOCKED,
+ MLXFW_FSM_STATE_INITIALIZE,
+ MLXFW_FSM_STATE_DOWNLOAD,
+ MLXFW_FSM_STATE_VERIFY,
+ MLXFW_FSM_STATE_APPLY,
+ MLXFW_FSM_STATE_ACTIVATE,
+};
+
+enum mlxfw_fsm_state_err {
+ MLXFW_FSM_STATE_ERR_OK,
+ MLXFW_FSM_STATE_ERR_ERROR,
+ MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR,
+ MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE,
+ MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY,
+ MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED,
+ MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED,
+ MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE,
+ MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT,
+ MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET,
+ MLXFW_FSM_STATE_ERR_MAX,
+};
+
+struct mlxfw_dev;
+
+struct mlxfw_dev_ops {
+ int (*component_query)(struct mlxfw_dev *mlxfw_dev, u16 component_index,
+ u32 *p_max_size, u8 *p_align_bits,
+ u16 *p_max_write_size);
+
+ int (*fsm_lock)(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle);
+
+ int (*fsm_component_update)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size);
+
+ int (*fsm_block_download)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset);
+
+ int (*fsm_component_verify)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index);
+
+ int (*fsm_activate)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ int (*fsm_query_state)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err);
+
+ void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+
+ void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+};
+
+struct mlxfw_dev {
+ const struct mlxfw_dev_ops *ops;
+ const char *psid;
+ u16 psid_size;
+};
+
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
new file mode 100644
index 000000000000..2cf89126fb23
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -0,0 +1,273 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "mlxfw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include "mlxfw.h"
+#include "mlxfw_mfa2.h"
+
+#define MLXFW_FSM_STATE_WAIT_CYCLE_MS 200
+#define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000
+#define MLXFW_FSM_STATE_WAIT_ROUNDS \
+ (MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
+#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
+
+static const char * const mlxfw_fsm_state_err_str[] = {
+ [MLXFW_FSM_STATE_ERR_ERROR] =
+ "general error",
+ [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] =
+ "component hash mismatch",
+ [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] =
+ "component not applicable",
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] =
+ "unknown key",
+ [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] =
+ "authentication failed",
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] =
+ "component was not signed",
+ [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] =
+ "key not applicable",
+ [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] =
+ "bad format",
+ [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] =
+ "pending reset",
+ [MLXFW_FSM_STATE_ERR_MAX] =
+ "unknown error"
+};
+
+static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state fsm_state)
+{
+ enum mlxfw_fsm_state_err fsm_state_err;
+ enum mlxfw_fsm_state curr_fsm_state;
+ int times;
+ int err;
+
+ times = MLXFW_FSM_STATE_WAIT_ROUNDS;
+retry:
+ err = mlxfw_dev->ops->fsm_query_state(mlxfw_dev, fwhandle,
+ &curr_fsm_state, &fsm_state_err);
+ if (err)
+ return err;
+
+ if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
+ pr_err("Firmware flash failed: %s\n",
+ mlxfw_fsm_state_err_str[fsm_state_err]);
+ return -EINVAL;
+ }
+ if (curr_fsm_state != fsm_state) {
+ if (--times == 0) {
+ pr_err("Timeout reached on FSM state change");
+ return -ETIMEDOUT;
+ }
+ msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
+ goto retry;
+ }
+ return 0;
+}
+
+#define MLXFW_ALIGN_DOWN(x, align_bits) ((x) & ~((1 << (align_bits)) - 1))
+#define MLXFW_ALIGN_UP(x, align_bits) \
+ MLXFW_ALIGN_DOWN((x) + ((1 << (align_bits)) - 1), (align_bits))
+
+static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ struct mlxfw_mfa2_component *comp)
+{
+ u16 comp_max_write_size;
+ u8 comp_align_bits;
+ u32 comp_max_size;
+ u16 block_size;
+ u8 *block_ptr;
+ u32 offset;
+ int err;
+
+ err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
+ &comp_max_size, &comp_align_bits,
+ &comp_max_write_size);
+ if (err)
+ return err;
+
+ comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
+ if (comp->data_size > comp_max_size) {
+ pr_err("Component %d is of size %d which is bigger than limit %d\n",
+ comp->index, comp->data_size, comp_max_size);
+ return -EINVAL;
+ }
+
+ comp_max_write_size = MLXFW_ALIGN_DOWN(comp_max_write_size,
+ comp_align_bits);
+
+ pr_debug("Component update\n");
+ err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
+ comp->index,
+ comp->data_size);
+ if (err)
+ return err;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_DOWNLOAD);
+ if (err)
+ goto err_out;
+
+ pr_debug("Component download\n");
+ for (offset = 0;
+ offset < MLXFW_ALIGN_UP(comp->data_size, comp_align_bits);
+ offset += comp_max_write_size) {
+ block_ptr = comp->data + offset;
+ block_size = (u16) min_t(u32, comp->data_size - offset,
+ comp_max_write_size);
+ err = mlxfw_dev->ops->fsm_block_download(mlxfw_dev, fwhandle,
+ block_ptr, block_size,
+ offset);
+ if (err)
+ goto err_out;
+ }
+
+ pr_debug("Component verify\n");
+ err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
+ comp->index);
+ if (err)
+ goto err_out;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ mlxfw_dev->ops->fsm_cancel(mlxfw_dev, fwhandle);
+ return err;
+}
+
+static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ struct mlxfw_mfa2_file *mfa2_file)
+{
+ u32 component_count;
+ int err;
+ int i;
+
+ err = mlxfw_mfa2_file_component_count(mfa2_file, mlxfw_dev->psid,
+ mlxfw_dev->psid_size,
+ &component_count);
+ if (err) {
+ pr_err("Could not find device PSID in MFA2 file\n");
+ return err;
+ }
+
+ for (i = 0; i < component_count; i++) {
+ struct mlxfw_mfa2_component *comp;
+
+ comp = mlxfw_mfa2_file_component_get(mfa2_file, mlxfw_dev->psid,
+ mlxfw_dev->psid_size, i);
+ if (IS_ERR(comp))
+ return PTR_ERR(comp);
+
+ pr_info("Flashing component type %d\n", comp->index);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp);
+ mlxfw_mfa2_file_component_put(comp);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware)
+{
+ struct mlxfw_mfa2_file *mfa2_file;
+ u32 fwhandle;
+ int err;
+
+ if (!mlxfw_mfa2_check(firmware)) {
+ pr_err("Firmware file is not MFA2\n");
+ return -EINVAL;
+ }
+
+ mfa2_file = mlxfw_mfa2_file_init(firmware);
+ if (IS_ERR(mfa2_file))
+ return PTR_ERR(mfa2_file);
+
+ pr_info("Initialize firmware flash process\n");
+ err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
+ if (err) {
+ pr_err("Could not lock the firmware FSM\n");
+ goto err_fsm_lock;
+ }
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_state_wait_idle_to_locked;
+
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file);
+ if (err)
+ goto err_flash_components;
+
+ pr_debug("Activate image\n");
+ err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
+ if (err) {
+ pr_err("Could not activate the downloaded image\n");
+ goto err_fsm_activate;
+ }
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle, MLXFW_FSM_STATE_LOCKED);
+ if (err)
+ goto err_state_wait_activate_to_locked;
+
+ pr_debug("Handle release\n");
+ mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
+
+ pr_info("Firmware flash done.\n");
+ mlxfw_mfa2_file_fini(mfa2_file);
+ return 0;
+
+err_state_wait_activate_to_locked:
+err_fsm_activate:
+err_flash_components:
+err_state_wait_idle_to_locked:
+ mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
+err_fsm_lock:
+ mlxfw_mfa2_file_fini(mfa2_file);
+ return err;
+}
+EXPORT_SYMBOL(mlxfw_firmware_flash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox firmware flash lib");
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
new file mode 100644
index 000000000000..7e9589061d30
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -0,0 +1,620 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "mlxfw_mfa2: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/xz.h>
+#include "mlxfw_mfa2.h"
+#include "mlxfw_mfa2_file.h"
+#include "mlxfw_mfa2_tlv.h"
+#include "mlxfw_mfa2_format.h"
+#include "mlxfw_mfa2_tlv_multi.h"
+
+/* MFA2 FILE
+ * +----------------------------------+
+ * | MFA2 finger print |
+ * +----------------------------------+
+ * | package descriptor multi_tlv |
+ * | +------------------------------+ | +-----------------+
+ * | | package descriptor tlv +-----> |num_devices=n |
+ * | +------------------------------+ | |num_components=m |
+ * +----------------------------------+ |CB offset |
+ * | device descriptor multi_tlv | |... |
+ * | +------------------------------+ | | |
+ * | | PSID tlv | | +-----------------+
+ * | +------------------------------+ |
+ * | | component index tlv | |
+ * | +------------------------------+ |
+ * +----------------------------------+
+ * | component descriptor multi_tlv |
+ * | +------------------------------+ | +-----------------+
+ * | | component descriptor tlv +-----> |Among others: |
+ * | +------------------------------+ | |CB offset=o |
+ * +----------------------------------+ |comp index=i |
+ * | | |... |
+ * | | | |
+ * | | +-----------------+
+ * | COMPONENT BLOCK (CB) |
+ * | |
+ * | |
+ * | |
+ * +----------------------------------+
+ *
+ * On the top level, an MFA2 file contains:
+ * - Fingerprint
+ * - Several multi_tlvs (TLVs of type MLXFW_MFA2_TLV_MULTI, as defined in
+ * mlxfw_mfa2_format.h)
+ * - Compresses content block
+ *
+ * The first multi_tlv
+ * -------------------
+ * The first multi TLV is treated as package descriptor, and expected to have a
+ * first TLV child of type MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR which contains all
+ * the global information needed to parse the file. Among others, it contains
+ * the number of device descriptors and component descriptor following this
+ * multi TLV.
+ *
+ * The device descriptor multi_tlv
+ * -------------------------------
+ * The multi TLVs following the package descriptor are treated as device
+ * descriptor, and are expected to have the following children:
+ * - PSID TLV child of type MLXFW_MFA2_TLV_PSID containing that device PSID.
+ * - Component index of type MLXFW_MFA2_TLV_COMPONENT_PTR that contains that
+ * device component index.
+ *
+ * The component descriptor multi_tlv
+ * ----------------------------------
+ * The multi TLVs following the device descriptor multi TLVs are treated as
+ * component descriptor, and are expected to have a first child of type
+ * MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR that contains mostly the component index,
+ * needed for the flash process and the offset to the binary within the
+ * component block.
+ */
+
+static const u8 mlxfw_mfa2_fingerprint[] = "MLNX.MFA2.XZ.00!";
+static const int mlxfw_mfa2_fingerprint_len =
+ sizeof(mlxfw_mfa2_fingerprint) - 1;
+
+static const u8 mlxfw_mfa2_comp_magic[] = "#BIN.COMPONENT!#";
+static const int mlxfw_mfa2_comp_magic_len = sizeof(mlxfw_mfa2_comp_magic) - 1;
+
+bool mlxfw_mfa2_check(const struct firmware *fw)
+{
+ if (fw->size < sizeof(mlxfw_mfa2_fingerprint))
+ return false;
+
+ return memcmp(fw->data, mlxfw_mfa2_fingerprint,
+ mlxfw_mfa2_fingerprint_len) == 0;
+}
+
+static bool
+mlxfw_mfa2_tlv_multi_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ /* Check that all children are valid */
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("Multi has invalid child");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+mlxfw_mfa2_file_dev_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *dev_tlv,
+ u16 dev_idx)
+{
+ const struct mlxfw_mfa2_tlv_component_ptr *cptr;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv_psid *psid;
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 cptr_count;
+ u16 cptr_idx;
+ int err;
+
+ pr_debug("Device %d\n", dev_idx);
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, dev_tlv);
+ if (!multi) {
+ pr_err("Device %d is not a valid TLV error\n", dev_idx);
+ return false;
+ }
+
+ if (!mlxfw_mfa2_tlv_multi_validate(mfa2_file, multi))
+ return false;
+
+ /* Validate the device has PSID tlv */
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, multi,
+ MLXFW_MFA2_TLV_PSID, 0);
+ if (!tlv) {
+ pr_err("Device %d does not have PSID\n", dev_idx);
+ return false;
+ }
+
+ psid = mlxfw_mfa2_tlv_psid_get(mfa2_file, tlv);
+ if (!psid) {
+ pr_err("Device %d PSID TLV is not valid\n", dev_idx);
+ return false;
+ }
+
+ print_hex_dump_debug(" -- Device PSID ", DUMP_PREFIX_NONE, 16, 16,
+ psid->psid, be16_to_cpu(tlv->len), true);
+
+ /* Validate the device has COMPONENT_PTR */
+ err = mlxfw_mfa2_tlv_multi_child_count(mfa2_file, multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ &cptr_count);
+ if (err)
+ return false;
+
+ if (cptr_count == 0) {
+ pr_err("Device %d has no components\n", dev_idx);
+ return false;
+ }
+
+ for (cptr_idx = 0; cptr_idx < cptr_count; cptr_idx++) {
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ cptr_idx);
+ if (!tlv)
+ return false;
+
+ cptr = mlxfw_mfa2_tlv_component_ptr_get(mfa2_file, tlv);
+ if (!cptr) {
+ pr_err("Device %d COMPONENT_PTR TLV is not valid\n",
+ dev_idx);
+ return false;
+ }
+
+ pr_debug(" -- Component index %d\n",
+ be16_to_cpu(cptr->component_index));
+ }
+ return true;
+}
+
+static bool
+mlxfw_mfa2_file_comp_validate(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *comp_tlv,
+ u16 comp_idx)
+{
+ const struct mlxfw_mfa2_tlv_component_descriptor *cdesc;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *tlv;
+
+ pr_debug("Component %d\n", comp_idx);
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, comp_tlv);
+ if (!multi) {
+ pr_err("Component %d is not a valid TLV error\n", comp_idx);
+ return false;
+ }
+
+ if (!mlxfw_mfa2_tlv_multi_validate(mfa2_file, multi))
+ return false;
+
+ /* Check that component have COMPONENT_DESCRIPTOR as first child */
+ tlv = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!tlv) {
+ pr_err("Component descriptor %d multi TLV error\n", comp_idx);
+ return false;
+ }
+
+ cdesc = mlxfw_mfa2_tlv_component_descriptor_get(mfa2_file, tlv);
+ if (!cdesc) {
+ pr_err("Component %d does not have a valid descriptor\n",
+ comp_idx);
+ return false;
+ }
+ pr_debug(" -- Component type %d\n", be16_to_cpu(cdesc->identifier));
+ pr_debug(" -- Offset 0x%llx and size %d\n",
+ ((u64) be32_to_cpu(cdesc->cb_offset_h) << 32)
+ | be32_to_cpu(cdesc->cb_offset_l), be32_to_cpu(cdesc->size));
+
+ return true;
+}
+
+static bool mlxfw_mfa2_file_validate(const struct mlxfw_mfa2_file *mfa2_file)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ pr_debug("Validating file\n");
+
+ /* check that all the devices exist */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, mfa2_file->first_dev,
+ mfa2_file->dev_count) {
+ if (!tlv) {
+ pr_err("Device TLV error\n");
+ return false;
+ }
+
+ /* Check each device */
+ if (!mlxfw_mfa2_file_dev_validate(mfa2_file, tlv, idx))
+ return false;
+ }
+
+ /* check that all the components exist */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, mfa2_file->first_component,
+ mfa2_file->component_count) {
+ if (!tlv) {
+ pr_err("Device TLV error\n");
+ return false;
+ }
+
+ /* Check each component */
+ if (!mlxfw_mfa2_file_comp_validate(mfa2_file, tlv, idx))
+ return false;
+ }
+ return true;
+}
+
+struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
+{
+ const struct mlxfw_mfa2_tlv_package_descriptor *pd;
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *multi_child;
+ const struct mlxfw_mfa2_tlv *first_tlv;
+ struct mlxfw_mfa2_file *mfa2_file;
+ const void *first_tlv_ptr;
+ const void *cb_top_ptr;
+
+ mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ if (!mfa2_file)
+ return ERR_PTR(-ENOMEM);
+
+ mfa2_file->fw = fw;
+ first_tlv_ptr = fw->data + NLA_ALIGN(mlxfw_mfa2_fingerprint_len);
+ first_tlv = mlxfw_mfa2_tlv_get(mfa2_file, first_tlv_ptr);
+ if (!first_tlv) {
+ pr_err("Could not parse package descriptor TLV\n");
+ goto err_out;
+ }
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, first_tlv);
+ if (!multi) {
+ pr_err("First TLV is not of valid multi type\n");
+ goto err_out;
+ }
+
+ multi_child = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!multi_child)
+ goto err_out;
+
+ pd = mlxfw_mfa2_tlv_package_descriptor_get(mfa2_file, multi_child);
+ if (!pd) {
+ pr_err("Could not parse package descriptor TLV\n");
+ goto err_out;
+ }
+
+ mfa2_file->first_dev = mlxfw_mfa2_tlv_next(mfa2_file, first_tlv);
+ if (!mfa2_file->first_dev) {
+ pr_err("First device TLV is not valid\n");
+ goto err_out;
+ }
+
+ mfa2_file->dev_count = be16_to_cpu(pd->num_devices);
+ mfa2_file->first_component = mlxfw_mfa2_tlv_advance(mfa2_file,
+ mfa2_file->first_dev,
+ mfa2_file->dev_count);
+ mfa2_file->component_count = be16_to_cpu(pd->num_components);
+ mfa2_file->cb = fw->data + NLA_ALIGN(be32_to_cpu(pd->cb_offset));
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, mfa2_file->cb)) {
+ pr_err("Component block is out side the file\n");
+ goto err_out;
+ }
+ mfa2_file->cb_archive_size = be32_to_cpu(pd->cb_archive_size);
+ cb_top_ptr = mfa2_file->cb + mfa2_file->cb_archive_size - 1;
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, cb_top_ptr)) {
+ pr_err("Component block size is too big\n");
+ goto err_out;
+ }
+
+ if (!mlxfw_mfa2_file_validate(mfa2_file))
+ goto err_out;
+ return mfa2_file;
+err_out:
+ kfree(mfa2_file);
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct mlxfw_mfa2_tlv_multi *
+mlxfw_mfa2_tlv_dev_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u16 psid_size)
+{
+ const struct mlxfw_mfa2_tlv_psid *tlv_psid;
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ const struct mlxfw_mfa2_tlv *dev_tlv;
+ const struct mlxfw_mfa2_tlv *tlv;
+ u32 idx;
+
+ /* for each device tlv */
+ mlxfw_mfa2_tlv_foreach(mfa2_file, dev_tlv, idx, mfa2_file->first_dev,
+ mfa2_file->dev_count) {
+ if (!dev_tlv)
+ return NULL;
+
+ dev_multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, dev_tlv);
+ if (!dev_multi)
+ return NULL;
+
+ /* find psid child and compare */
+ tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_PSID, 0);
+ if (!tlv)
+ return NULL;
+ if (be16_to_cpu(tlv->len) != psid_size)
+ continue;
+
+ tlv_psid = mlxfw_mfa2_tlv_psid_get(mfa2_file, tlv);
+ if (!tlv_psid)
+ return NULL;
+
+ if (memcmp(psid, tlv_psid->psid, psid_size) == 0)
+ return dev_multi;
+ }
+
+ return NULL;
+}
+
+int mlxfw_mfa2_file_component_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u32 psid_size,
+ u32 *p_count)
+{
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ u16 count;
+ int err;
+
+ dev_multi = mlxfw_mfa2_tlv_dev_get(mfa2_file, psid, psid_size);
+ if (!dev_multi)
+ return -EINVAL;
+
+ err = mlxfw_mfa2_tlv_multi_child_count(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ &count);
+ if (err)
+ return err;
+
+ *p_count = count;
+ return 0;
+}
+
+static int mlxfw_mfa2_xz_dec_run(struct xz_dec *xz_dec, struct xz_buf *xz_buf,
+ bool *finished)
+{
+ enum xz_ret xz_ret;
+
+ xz_ret = xz_dec_run(xz_dec, xz_buf);
+
+ switch (xz_ret) {
+ case XZ_STREAM_END:
+ *finished = true;
+ return 0;
+ case XZ_OK:
+ *finished = false;
+ return 0;
+ case XZ_MEM_ERROR:
+ pr_err("xz no memory\n");
+ return -ENOMEM;
+ case XZ_DATA_ERROR:
+ pr_err("xz file corrupted\n");
+ return -EINVAL;
+ case XZ_FORMAT_ERROR:
+ pr_err("xz format not found\n");
+ return -EINVAL;
+ case XZ_OPTIONS_ERROR:
+ pr_err("unsupported xz option\n");
+ return -EINVAL;
+ case XZ_MEMLIMIT_ERROR:
+ pr_err("xz dictionary too small\n");
+ return -EINVAL;
+ default:
+ pr_err("xz error %d\n", xz_ret);
+ return -EINVAL;
+ }
+}
+
+static int mlxfw_mfa2_file_cb_offset_xz(const struct mlxfw_mfa2_file *mfa2_file,
+ off_t off, size_t size, u8 *buf)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf dec_buf;
+ off_t curr_off = 0;
+ bool finished;
+ int err;
+
+ xz_dec = xz_dec_init(XZ_DYNALLOC, (u32) -1);
+ if (!xz_dec)
+ return -EINVAL;
+
+ dec_buf.in_size = mfa2_file->cb_archive_size;
+ dec_buf.in = mfa2_file->cb;
+ dec_buf.in_pos = 0;
+ dec_buf.out = buf;
+
+ /* decode up to the offset */
+ do {
+ dec_buf.out_pos = 0;
+ dec_buf.out_size = min_t(size_t, size, off - curr_off);
+ if (dec_buf.out_size == 0)
+ break;
+
+ err = mlxfw_mfa2_xz_dec_run(xz_dec, &dec_buf, &finished);
+ if (err)
+ goto out;
+ if (finished) {
+ pr_err("xz section too short\n");
+ err = -EINVAL;
+ goto out;
+ }
+ curr_off += dec_buf.out_pos;
+ } while (curr_off != off);
+
+ /* decode the needed section */
+ dec_buf.out_pos = 0;
+ dec_buf.out_size = size;
+ err = mlxfw_mfa2_xz_dec_run(xz_dec, &dec_buf, &finished);
+ if (err)
+ goto out;
+out:
+ xz_dec_end(xz_dec);
+ return err;
+}
+
+static const struct mlxfw_mfa2_tlv_component_descriptor *
+mlxfw_mfa2_file_component_tlv_get(const struct mlxfw_mfa2_file *mfa2_file,
+ u16 comp_index)
+{
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ const struct mlxfw_mfa2_tlv *multi_child;
+ const struct mlxfw_mfa2_tlv *comp_tlv;
+
+ if (comp_index > mfa2_file->component_count)
+ return NULL;
+
+ comp_tlv = mlxfw_mfa2_tlv_advance(mfa2_file, mfa2_file->first_component,
+ comp_index);
+ if (!comp_tlv)
+ return NULL;
+
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, comp_tlv);
+ if (!multi)
+ return NULL;
+
+ multi_child = mlxfw_mfa2_tlv_multi_child(mfa2_file, multi);
+ if (!multi_child)
+ return NULL;
+
+ return mlxfw_mfa2_tlv_component_descriptor_get(mfa2_file, multi_child);
+}
+
+struct mlxfw_mfa2_comp_data {
+ struct mlxfw_mfa2_component comp;
+ u8 buff[0];
+};
+
+static const struct mlxfw_mfa2_tlv_component_descriptor *
+mlxfw_mfa2_file_component_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index)
+{
+ const struct mlxfw_mfa2_tlv_component_ptr *cptr;
+ const struct mlxfw_mfa2_tlv_multi *dev_multi;
+ const struct mlxfw_mfa2_tlv *cptr_tlv;
+ u16 comp_idx;
+
+ dev_multi = mlxfw_mfa2_tlv_dev_get(mfa2_file, psid, psid_size);
+ if (!dev_multi)
+ return NULL;
+
+ cptr_tlv = mlxfw_mfa2_tlv_multi_child_find(mfa2_file, dev_multi,
+ MLXFW_MFA2_TLV_COMPONENT_PTR,
+ component_index);
+ if (!cptr_tlv)
+ return NULL;
+
+ cptr = mlxfw_mfa2_tlv_component_ptr_get(mfa2_file, cptr_tlv);
+ if (!cptr)
+ return NULL;
+
+ comp_idx = be16_to_cpu(cptr->component_index);
+ return mlxfw_mfa2_file_component_tlv_get(mfa2_file, comp_idx);
+}
+
+struct mlxfw_mfa2_component *
+mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index)
+{
+ const struct mlxfw_mfa2_tlv_component_descriptor *comp;
+ struct mlxfw_mfa2_comp_data *comp_data;
+ u32 comp_buf_size;
+ off_t cb_offset;
+ u32 comp_size;
+ int err;
+
+ comp = mlxfw_mfa2_file_component_find(mfa2_file, psid, psid_size,
+ component_index);
+ if (!comp)
+ return ERR_PTR(-EINVAL);
+
+ cb_offset = (u64) be32_to_cpu(comp->cb_offset_h) << 32 |
+ be32_to_cpu(comp->cb_offset_l);
+ comp_size = be32_to_cpu(comp->size);
+ comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
+
+ comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ if (!comp_data)
+ return ERR_PTR(-ENOMEM);
+ comp_data->comp.data_size = comp_size;
+ comp_data->comp.index = be16_to_cpu(comp->identifier);
+ err = mlxfw_mfa2_file_cb_offset_xz(mfa2_file, cb_offset, comp_buf_size,
+ comp_data->buff);
+ if (err) {
+ pr_err("Component could not be reached in CB\n");
+ goto err_out;
+ }
+
+ if (memcmp(comp_data->buff, mlxfw_mfa2_comp_magic,
+ mlxfw_mfa2_comp_magic_len) != 0) {
+ pr_err("Component has wrong magic\n");
+ goto err_out;
+ }
+
+ comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
+ return &comp_data->comp;
+err_out:
+ kfree(comp_data);
+ return ERR_PTR(err);
+}
+
+void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
+{
+ const struct mlxfw_mfa2_comp_data *comp_data;
+
+ comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
+ kfree(comp_data);
+}
+
+void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
+{
+ kfree(mfa2_file);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
new file mode 100644
index 000000000000..20472aa139cd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_H
+#define _MLXFW_MFA2_H
+
+#include <linux/firmware.h>
+#include "mlxfw.h"
+
+struct mlxfw_mfa2_component {
+ u16 index;
+ u32 data_size;
+ u8 *data;
+};
+
+struct mlxfw_mfa2_file;
+
+bool mlxfw_mfa2_check(const struct firmware *fw);
+
+struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw);
+
+int mlxfw_mfa2_file_component_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, u32 psid_size,
+ u32 *p_count);
+
+struct mlxfw_mfa2_component *
+mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const char *psid, int psid_size,
+ int component_index);
+
+void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *component);
+
+void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
new file mode 100644
index 000000000000..f667942b1ea3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
@@ -0,0 +1,60 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_FILE_H
+#define _MLXFW_MFA2_FILE_H
+
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+
+struct mlxfw_mfa2_file {
+ const struct firmware *fw;
+ const struct mlxfw_mfa2_tlv *first_dev;
+ u16 dev_count;
+ const struct mlxfw_mfa2_tlv *first_component;
+ u16 component_count;
+ const void *cb; /* components block */
+ u32 cb_archive_size; /* size of compressed components block */
+};
+
+static inline bool mlxfw_mfa2_valid_ptr(const struct mlxfw_mfa2_file *mfa2_file,
+ const void *ptr)
+{
+ const void *valid_to = mfa2_file->fw->data + mfa2_file->fw->size;
+ const void *valid_from = mfa2_file->fw->data;
+
+ return ptr > valid_from && ptr < valid_to;
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
new file mode 100644
index 000000000000..dd66737c033d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXFW_MFA2_FORMAT_H
+#define _MLXFW_MFA2_FORMAT_H
+
+#include "mlxfw_mfa2_file.h"
+#include "mlxfw_mfa2_tlv.h"
+
+enum mlxfw_mfa2_tlv_type {
+ MLXFW_MFA2_TLV_MULTI_PART = 0x01,
+ MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR = 0x02,
+ MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR = 0x04,
+ MLXFW_MFA2_TLV_COMPONENT_PTR = 0x22,
+ MLXFW_MFA2_TLV_PSID = 0x2A,
+};
+
+enum mlxfw_mfa2_compression_type {
+ MLXFW_MFA2_COMPRESSION_TYPE_NONE,
+ MLXFW_MFA2_COMPRESSION_TYPE_XZ,
+};
+
+struct mlxfw_mfa2_tlv_package_descriptor {
+ __be16 num_components;
+ __be16 num_devices;
+ __be32 cb_offset;
+ __be32 cb_archive_size;
+ __be32 cb_size_h;
+ __be32 cb_size_l;
+ u8 padding[3];
+ u8 cv_compression;
+ __be32 user_data_offset;
+} __packed;
+
+MLXFW_MFA2_TLV(package_descriptor, struct mlxfw_mfa2_tlv_package_descriptor,
+ MLXFW_MFA2_TLV_PACKAGE_DESCRIPTOR);
+
+struct mlxfw_mfa2_tlv_multi {
+ __be16 num_extensions;
+ __be16 total_len;
+} __packed;
+
+MLXFW_MFA2_TLV(multi, struct mlxfw_mfa2_tlv_multi,
+ MLXFW_MFA2_TLV_MULTI_PART);
+
+struct mlxfw_mfa2_tlv_psid {
+ u8 psid[0];
+} __packed;
+
+MLXFW_MFA2_TLV_VARSIZE(psid, struct mlxfw_mfa2_tlv_psid,
+ MLXFW_MFA2_TLV_PSID);
+
+struct mlxfw_mfa2_tlv_component_ptr {
+ __be16 storage_id;
+ __be16 component_index;
+ __be32 storage_address;
+} __packed;
+
+MLXFW_MFA2_TLV(component_ptr, struct mlxfw_mfa2_tlv_component_ptr,
+ MLXFW_MFA2_TLV_COMPONENT_PTR);
+
+struct mlxfw_mfa2_tlv_component_descriptor {
+ __be16 pldm_classification;
+ __be16 identifier;
+ __be32 cb_offset_h;
+ __be32 cb_offset_l;
+ __be32 size;
+} __packed;
+
+MLXFW_MFA2_TLV(component_descriptor, struct mlxfw_mfa2_tlv_component_descriptor,
+ MLXFW_MFA2_TLV_COMPONENT_DESCRIPTOR);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
new file mode 100644
index 000000000000..cc013e77b326
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -0,0 +1,98 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXFW_MFA2_TLV_H
+#define _MLXFW_MFA2_TLV_H
+
+#include <linux/kernel.h>
+#include "mlxfw_mfa2_file.h"
+
+struct mlxfw_mfa2_tlv {
+ u8 version;
+ u8 type;
+ __be16 len;
+ u8 data[0];
+} __packed;
+
+static inline const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_get(const struct mlxfw_mfa2_file *mfa2_file, const void *ptr)
+{
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, ptr) ||
+ !mlxfw_mfa2_valid_ptr(mfa2_file, ptr + sizeof(struct mlxfw_mfa2_tlv)))
+ return NULL;
+ return ptr;
+}
+
+static inline const void *
+mlxfw_mfa2_tlv_payload_get(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv, u8 payload_type,
+ size_t payload_size, bool varsize)
+{
+ void *tlv_top;
+
+ tlv_top = (void *) tlv + be16_to_cpu(tlv->len) - 1;
+ if (!mlxfw_mfa2_valid_ptr(mfa2_file, tlv) ||
+ !mlxfw_mfa2_valid_ptr(mfa2_file, tlv_top))
+ return NULL;
+ if (tlv->type != payload_type)
+ return NULL;
+ if (varsize && (be16_to_cpu(tlv->len) < payload_size))
+ return NULL;
+ if (!varsize && (be16_to_cpu(tlv->len) != payload_size))
+ return NULL;
+
+ return tlv->data;
+}
+
+#define MLXFW_MFA2_TLV(name, payload_type, tlv_type) \
+static inline const payload_type * \
+mlxfw_mfa2_tlv_ ## name ## _get(const struct mlxfw_mfa2_file *mfa2_file, \
+ const struct mlxfw_mfa2_tlv *tlv) \
+{ \
+ return mlxfw_mfa2_tlv_payload_get(mfa2_file, tlv, \
+ tlv_type, sizeof(payload_type), \
+ false); \
+}
+
+#define MLXFW_MFA2_TLV_VARSIZE(name, payload_type, tlv_type) \
+static inline const payload_type * \
+mlxfw_mfa2_tlv_ ## name ## _get(const struct mlxfw_mfa2_file *mfa2_file, \
+ const struct mlxfw_mfa2_tlv *tlv) \
+{ \
+ return mlxfw_mfa2_tlv_payload_get(mfa2_file, tlv, \
+ tlv_type, sizeof(payload_type), \
+ true); \
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
new file mode 100644
index 000000000000..0094b92a233b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
@@ -0,0 +1,126 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) "MFA2: " fmt
+
+#include "mlxfw_mfa2_tlv_multi.h"
+#include <uapi/linux/netlink.h>
+
+#define MLXFW_MFA2_TLV_TOTAL_SIZE(tlv) \
+ NLA_ALIGN(sizeof(*(tlv)) + be16_to_cpu((tlv)->len))
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi)
+{
+ size_t multi_len;
+
+ multi_len = NLA_ALIGN(sizeof(struct mlxfw_mfa2_tlv_multi));
+ return mlxfw_mfa2_tlv_get(mfa2_file, (void *) multi + multi_len);
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv)
+{
+ const struct mlxfw_mfa2_tlv_multi *multi;
+ u16 tlv_len;
+ void *next;
+
+ tlv_len = MLXFW_MFA2_TLV_TOTAL_SIZE(tlv);
+
+ if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
+ multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
+ tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
+ }
+
+ next = (void *) tlv + tlv_len;
+ return mlxfw_mfa2_tlv_get(mfa2_file, next);
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_advance(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *from_tlv, u16 count)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, from_tlv, count)
+ if (!tlv)
+ return NULL;
+ return tlv;
+}
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type, u16 index)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 skip = 0;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("TLV parsing error\n");
+ return NULL;
+ }
+ if (tlv->type == type)
+ if (skip++ == index)
+ return tlv;
+ }
+ return NULL;
+}
+
+int mlxfw_mfa2_tlv_multi_child_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type,
+ u16 *p_count)
+{
+ const struct mlxfw_mfa2_tlv *tlv;
+ u16 count = 0;
+ u16 idx;
+
+ mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) {
+ if (!tlv) {
+ pr_err("TLV parsing error\n");
+ return -EINVAL;
+ }
+
+ if (tlv->type == type)
+ count++;
+ }
+ *p_count = count;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
new file mode 100644
index 000000000000..2c667894f3a2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
@@ -0,0 +1,71 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXFW_MFA2_TLV_MULTI_H
+#define _MLXFW_MFA2_TLV_MULTI_H
+
+#include "mlxfw_mfa2_tlv.h"
+#include "mlxfw_mfa2_format.h"
+#include "mlxfw_mfa2_file.h"
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *tlv);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_advance(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv *from_tlv, u16 count);
+
+const struct mlxfw_mfa2_tlv *
+mlxfw_mfa2_tlv_multi_child_find(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type, u16 index);
+
+int mlxfw_mfa2_tlv_multi_child_count(const struct mlxfw_mfa2_file *mfa2_file,
+ const struct mlxfw_mfa2_tlv_multi *multi,
+ enum mlxfw_mfa2_tlv_type type,
+ u16 *p_count);
+
+#define mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, from_tlv, count) \
+ for (idx = 0, tlv = from_tlv; idx < (count); \
+ idx++, tlv = mlxfw_mfa2_tlv_next(mfa2_file, tlv))
+
+#define mlxfw_mfa2_tlv_multi_foreach(mfa2_file, tlv, idx, multi) \
+ mlxfw_mfa2_tlv_foreach(mfa2_file, tlv, idx, \
+ mlxfw_mfa2_tlv_multi_child(mfa2_file, multi), \
+ be16_to_cpu(multi->num_extensions) + 1)
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index ef23eaedc2ff..b9f80c2a8ae9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -75,6 +75,7 @@ config MLXSW_SPECTRUM
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
depends on PSAMPLE || PSAMPLE=n
select PARMAN
+ select MLXFW
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 2fb8c6585ac7..62fc42f396bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -16,7 +16,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
- spectrum_cnt.o spectrum_dpipe.o
+ spectrum_cnt.o spectrum_dpipe.o \
+ spectrum_fid.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 7fb35395adf5..6e966af72fc4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -344,15 +344,17 @@ struct mlxsw_bus {
u8 features;
};
+struct mlxsw_fw_rev {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+};
+
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
struct device *dev;
- struct {
- u16 major;
- u16 minor;
- u16 subminor;
- } fw_rev;
+ struct mlxsw_fw_rev fw_rev;
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c75e9141e3ec..9807ef814e42 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -56,6 +56,7 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -102,6 +103,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 83b277c8090e..182150afd5ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5643,6 +5643,222 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCQI - Management Component Query Information
+ * ---------------------------------------------
+ * This register allows querying information about firmware components.
+ */
+#define MLXSW_REG_MCQI_ID 0x9061
+#define MLXSW_REG_MCQI_BASE_LEN 0x18
+#define MLXSW_REG_MCQI_CAP_LEN 0x14
+#define MLXSW_REG_MCQI_LEN (MLXSW_REG_MCQI_BASE_LEN + MLXSW_REG_MCQI_CAP_LEN)
+
+MLXSW_REG_DEFINE(mcqi, MLXSW_REG_MCQI_ID, MLXSW_REG_MCQI_LEN);
+
+/* reg_mcqi_component_index
+ * Index of the accessed component.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcqi, component_index, 0x00, 0, 16);
+
+enum mlxfw_reg_mcqi_info_type {
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES,
+};
+
+/* reg_mcqi_info_type
+ * Component properties set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, info_type, 0x08, 0, 5);
+
+/* reg_mcqi_offset
+ * The requested/returned data offset from the section start, given in bytes.
+ * Must be DWORD aligned.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, offset, 0x10, 0, 32);
+
+/* reg_mcqi_data_size
+ * The requested/returned data size, given in bytes. If data_size is not DWORD
+ * aligned, the last bytes are zero padded.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcqi, data_size, 0x14, 0, 16);
+
+/* reg_mcqi_cap_max_component_size
+ * Maximum size for this component, given in bytes.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_max_component_size, 0x20, 0, 32);
+
+/* reg_mcqi_cap_log_mcda_word_size
+ * Log 2 of the access word size in bytes. Read and write access must be aligned
+ * to the word size. Write access must be done for an integer number of words.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_log_mcda_word_size, 0x24, 28, 4);
+
+/* reg_mcqi_cap_mcda_max_write_size
+ * Maximal write size for MCDA register
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcqi, cap_mcda_max_write_size, 0x24, 0, 16);
+
+static inline void mlxsw_reg_mcqi_pack(char *payload, u16 component_index)
+{
+ MLXSW_REG_ZERO(mcqi, payload);
+ mlxsw_reg_mcqi_component_index_set(payload, component_index);
+ mlxsw_reg_mcqi_info_type_set(payload,
+ MLXSW_REG_MCQI_INFO_TYPE_CAPABILITIES);
+ mlxsw_reg_mcqi_offset_set(payload, 0);
+ mlxsw_reg_mcqi_data_size_set(payload, MLXSW_REG_MCQI_CAP_LEN);
+}
+
+static inline void mlxsw_reg_mcqi_unpack(char *payload,
+ u32 *p_cap_max_component_size,
+ u8 *p_cap_log_mcda_word_size,
+ u16 *p_cap_mcda_max_write_size)
+{
+ *p_cap_max_component_size =
+ mlxsw_reg_mcqi_cap_max_component_size_get(payload);
+ *p_cap_log_mcda_word_size =
+ mlxsw_reg_mcqi_cap_log_mcda_word_size_get(payload);
+ *p_cap_mcda_max_write_size =
+ mlxsw_reg_mcqi_cap_mcda_max_write_size_get(payload);
+}
+
+/* MCC - Management Component Control
+ * ----------------------------------
+ * Controls the firmware component and updates the FSM.
+ */
+#define MLXSW_REG_MCC_ID 0x9062
+#define MLXSW_REG_MCC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mcc, MLXSW_REG_MCC_ID, MLXSW_REG_MCC_LEN);
+
+enum mlxsw_reg_mcc_instruction {
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
+ MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
+ MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
+ MLXSW_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
+ MLXSW_REG_MCC_INSTRUCTION_CANCEL = 0x08,
+};
+
+/* reg_mcc_instruction
+ * Command to be executed by the FSM.
+ * Applicable for write operation only.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcc, instruction, 0x00, 0, 8);
+
+/* reg_mcc_component_index
+ * Index of the accessed component. Applicable only for commands that
+ * refer to components. Otherwise, this field is reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcc, component_index, 0x04, 0, 16);
+
+/* reg_mcc_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, update_handle, 0x08, 0, 24);
+
+/* reg_mcc_error_code
+ * Indicates the successful completion of the instruction, or the reason it
+ * failed
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, error_code, 0x0C, 8, 8);
+
+/* reg_mcc_control_state
+ * Current FSM state
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcc, control_state, 0x0C, 0, 4);
+
+/* reg_mcc_component_size
+ * Component size in bytes. Valid for UPDATE_COMPONENT instruction. Specifying
+ * the size may shorten the update time. Value 0x0 means that size is
+ * unspecified.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mcc, component_size, 0x10, 0, 32);
+
+static inline void mlxsw_reg_mcc_pack(char *payload,
+ enum mlxsw_reg_mcc_instruction instr,
+ u16 component_index, u32 update_handle,
+ u32 component_size)
+{
+ MLXSW_REG_ZERO(mcc, payload);
+ mlxsw_reg_mcc_instruction_set(payload, instr);
+ mlxsw_reg_mcc_component_index_set(payload, component_index);
+ mlxsw_reg_mcc_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcc_component_size_set(payload, component_size);
+}
+
+static inline void mlxsw_reg_mcc_unpack(char *payload, u32 *p_update_handle,
+ u8 *p_error_code, u8 *p_control_state)
+{
+ if (p_update_handle)
+ *p_update_handle = mlxsw_reg_mcc_update_handle_get(payload);
+ if (p_error_code)
+ *p_error_code = mlxsw_reg_mcc_error_code_get(payload);
+ if (p_control_state)
+ *p_control_state = mlxsw_reg_mcc_control_state_get(payload);
+}
+
+/* MCDA - Management Component Data Access
+ * ---------------------------------------
+ * This register allows reading and writing a firmware component.
+ */
+#define MLXSW_REG_MCDA_ID 0x9063
+#define MLXSW_REG_MCDA_BASE_LEN 0x10
+#define MLXSW_REG_MCDA_MAX_DATA_LEN 0x80
+#define MLXSW_REG_MCDA_LEN \
+ (MLXSW_REG_MCDA_BASE_LEN + MLXSW_REG_MCDA_MAX_DATA_LEN)
+
+MLXSW_REG_DEFINE(mcda, MLXSW_REG_MCDA_ID, MLXSW_REG_MCDA_LEN);
+
+/* reg_mcda_update_handle
+ * Token representing the current flow executed by the FSM.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, update_handle, 0x00, 0, 24);
+
+/* reg_mcda_offset
+ * Offset of accessed address relative to component start. Accesses must be in
+ * accordance to log_mcda_word_size in MCQI reg.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, offset, 0x04, 0, 32);
+
+/* reg_mcda_size
+ * Size of the data accessed, given in bytes.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mcda, size, 0x08, 0, 16);
+
+/* reg_mcda_data
+ * Data block accessed.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, mcda, data, 0x10, 0, 32, 4, 0, false);
+
+static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle,
+ u32 offset, u16 size, u8 *data)
+{
+ int i;
+
+ MLXSW_REG_ZERO(mcda, payload);
+ mlxsw_reg_mcda_update_handle_set(payload, update_handle);
+ mlxsw_reg_mcda_offset_set(payload, offset);
+ mlxsw_reg_mcda_size_set(payload, size);
+
+ for (i = 0; i < size / 4; i++)
+ mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]);
+}
+
/* MPSC - Monitoring Packet Sampling Configuration Register
* --------------------------------------------------------
* MPSC Register is used to configure the Packet Sampling mechanism.
@@ -6221,6 +6437,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
+ MLXSW_REG(mcqi),
+ MLXSW_REG(mcc),
+ MLXSW_REG(mcda),
MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 88357cee7679..666bcf4854e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -68,6 +68,22 @@
#include "txheader.h"
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
+#include "../mlxfw/mlxfw.h"
+
+#define MLXSW_FWREV_MAJOR 13
+#define MLXSW_FWREV_MINOR 1420
+#define MLXSW_FWREV_SUBMINOR 122
+
+static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
+ .major = MLXSW_FWREV_MAJOR,
+ .minor = MLXSW_FWREV_MINOR,
+ .subminor = MLXSW_FWREV_SUBMINOR
+};
+
+#define MLXSW_SP_FW_FILENAME \
+ "mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
+ "." __stringify(MLXSW_FWREV_MINOR) \
+ "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
@@ -140,6 +156,216 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+struct mlxsw_sp_mlxfw_dev {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data, u16 size,
+ u32 offset)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
+ fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
+ fwhandle, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
+ fwhandle, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
+ .component_query = mlxsw_sp_component_query,
+ .fsm_lock = mlxsw_sp_fsm_lock,
+ .fsm_component_update = mlxsw_sp_fsm_component_update,
+ .fsm_block_download = mlxsw_sp_fsm_block_download,
+ .fsm_component_verify = mlxsw_sp_fsm_component_verify,
+ .fsm_activate = mlxsw_sp_fsm_activate,
+ .fsm_query_state = mlxsw_sp_fsm_query_state,
+ .fsm_cancel = mlxsw_sp_fsm_cancel,
+ .fsm_release = mlxsw_sp_fsm_release
+};
+
+static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
+ const struct mlxsw_fw_rev *b)
+{
+ if (a->major != b->major)
+ return a->major > b->major;
+ if (a->minor != b->minor)
+ return a->minor > b->minor;
+ return a->subminor >= b->subminor;
+}
+
+static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+ struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_sp_mlxfw_dev_ops,
+ .psid = mlxsw_sp->bus_info->psid,
+ .psid_size = strlen(mlxsw_sp->bus_info->psid),
+ },
+ .mlxsw_sp = mlxsw_sp
+ };
+ const struct firmware *firmware;
+ int err;
+
+ if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
+ return 0;
+
+ dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+ rev->major, rev->minor, rev->subminor);
+ dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
+ MLXSW_SP_FW_FILENAME);
+
+ err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+ mlxsw_sp->bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
+ MLXSW_SP_FW_FILENAME);
+ return err;
+ }
+
+ err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ release_firmware(firmware);
+ return err;
+}
+
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -210,6 +436,41 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ int err;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+ break;
+ case BR_STATE_LEARNING:
+ spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+ break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_BLOCKING:
+ spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+ break;
+ default:
+ BUG();
+ }
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
{
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
@@ -609,8 +870,7 @@ static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
swid);
}
-static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool enable)
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char svpe_pl[MLXSW_REG_SVPE_LEN];
@@ -619,21 +879,8 @@ static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
}
-int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
- fid, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
-int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable)
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *spvmlr_pl;
@@ -642,18 +889,56 @@ int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
if (!spvmlr_pl)
return -ENOMEM;
- mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
- vid_end, learn_enable);
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
kfree(spvmlr_pl);
return err;
}
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, bool learn_enable)
+static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
{
- return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- learn_enable);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool allow)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spaft_pl[MLXSW_REG_SPAFT_LEN];
+
+ mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
+}
+
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ int err;
+
+ if (!vid) {
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+ } else {
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_allow_untagged_set;
+ }
+
+ mlxsw_sp_port->pvid = vid;
+ return 0;
+
+err_port_allow_untagged_set:
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
+ return err;
}
static int
@@ -1100,95 +1385,82 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
return 0;
}
-static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid, last_visited_vid;
- int err;
-
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
- vid);
- if (err) {
- last_visited_vid = vid;
- goto err_port_vid_to_fid_set;
- }
- }
-
- err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
- if (err) {
- last_visited_vid = VLAN_N_VID;
- goto err_port_vid_to_fid_set;
- }
-
- return 0;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
-err_port_vid_to_fid_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
- vid);
- return err;
+ list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
+ &mlxsw_sp_port->vlans_list, list)
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
-static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ bool untagged = vid == 1;
int err;
- err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
if (err)
- return err;
+ return ERR_PTR(err);
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
- vid, vid);
- if (err)
- return err;
+ mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
+ if (!mlxsw_sp_port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
}
- return 0;
+ mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+ mlxsw_sp_port_vlan->vid = vid;
+ list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
+
+ return mlxsw_sp_port_vlan;
+
+err_port_vlan_alloc:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ return ERR_PTR(err);
}
-static struct mlxsw_sp_port *
-mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+static void
+mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
- if (!mlxsw_sp_vport)
- return NULL;
+ list_del(&mlxsw_sp_port_vlan->list);
+ kfree(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+}
- /* dev will be set correctly after the VLAN device is linked
- * with the real device. In case of bridge SELF invocation, dev
- * will remain as is.
- */
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
- mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
- mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
- mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
- mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
- mlxsw_sp_vport->vport.vid = vid;
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan)
+ return mlxsw_sp_port_vlan;
- return mlxsw_sp_vport;
+ return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
}
-static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
+void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- list_del(&mlxsw_sp_vport->vport.list);
- kfree(mlxsw_sp_vport);
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (mlxsw_sp_port_vlan->bridge_port)
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ else if (fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
- bool untagged = vid == 1;
- int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
* reserved in our case, so simply return.
@@ -1196,43 +1468,14 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
- return 0;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport)
- return -ENOMEM;
-
- /* When adding the first VLAN interface on a bridged port we need to
- * transition all the active 802.1Q bridge VLANs to use explicit
- * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err)
- goto err_port_vp_mode_trans;
- }
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
- if (err)
- goto err_port_add_vid;
-
- return 0;
-
-err_port_add_vid:
- if (list_is_singular(&mlxsw_sp_port->vports_list))
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-err_port_vp_mode_trans:
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- return err;
+ return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
}
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@@ -1240,27 +1483,10 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
if (!vid)
return 0;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_port_vlan)
return 0;
-
- mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
-
- /* Drop FID reference. If this was the last reference the
- * resources will be freed.
- */
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f && !WARN_ON(!f->leave))
- f->leave(mlxsw_sp_vport);
-
- /* When removing the last VLAN interface on a bridged port we need to
- * transition all active 802.1Q bridge VLANs to use VID to FID
- * mappings and set port's mode to VLAN mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list))
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
-
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return 0;
}
@@ -2398,24 +2624,12 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- mlxsw_sp_port->pvid = 1;
-
- return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
-}
-
-static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
-}
-
static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
- size_t bytes;
int err;
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
@@ -2426,23 +2640,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->pvid = 1;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
mlxsw_sp_port->link.autoneg = 1;
- bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
- mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
- if (!mlxsw_sp_port->active_vlans) {
- err = -ENOMEM;
- goto err_port_active_vlans_alloc;
- }
- mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
- if (!mlxsw_sp_port->untagged_vlans) {
- err = -ENOMEM;
- goto err_port_untagged_vlans_alloc;
- }
- INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
+ INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
@@ -2547,11 +2751,18 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_dcb_init;
}
- err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_fids_init;
+ }
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ if (IS_ERR(mlxsw_sp_port_vlan)) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
mlxsw_sp_port->local_port);
- goto err_port_pvid_vport_create;
+ goto err_port_vlan_get;
}
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
@@ -2572,8 +2783,10 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
-err_port_pvid_vport_create:
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+err_port_vlan_get:
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
+err_port_fids_init:
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
@@ -2591,10 +2804,6 @@ err_alloc_hw_stats:
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
- kfree(mlxsw_sp_port->untagged_vlans);
-err_port_untagged_vlans_alloc:
- kfree(mlxsw_sp_port->active_vlans);
-err_port_active_vlans_alloc:
free_netdev(dev);
return err;
}
@@ -2630,16 +2839,15 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
- kfree(mlxsw_sp_port->untagged_vlans);
- kfree(mlxsw_sp_port->active_vlans);
- WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
}
@@ -3192,57 +3400,6 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
}
}
-static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
- enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type)
-{
- enum mlxsw_flood_table_type table_type;
- enum mlxsw_sp_flood_table flood_table;
- char sfgc_pl[MLXSW_REG_SFGC_LEN];
-
- if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- else
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-
- switch (type) {
- case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
- flood_table = MLXSW_SP_FLOOD_TABLE_UC;
- break;
- case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
- flood_table = MLXSW_SP_FLOOD_TABLE_MC;
- break;
- default:
- flood_table = MLXSW_SP_FLOOD_TABLE_BC;
- }
-
- mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
- flood_table);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
-}
-
-static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
-{
- int type, err;
-
- for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
- if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
- continue;
-
- err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
- if (err)
- return err;
-
- err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
@@ -3290,18 +3447,6 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
-static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create);
-
-static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp)
-{
- return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true);
-}
-
-static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false);
-}
-
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3310,9 +3455,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- INIT_LIST_HEAD(&mlxsw_sp->fids);
- INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
- INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
+
+ err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
+ return err;
+ }
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3320,16 +3468,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sp_traps_init(mlxsw_sp);
+ err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
return err;
}
- err = mlxsw_sp_flood_init(mlxsw_sp);
+ err = mlxsw_sp_traps_init(mlxsw_sp);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
- goto err_flood_init;
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
+ goto err_traps_init;
}
err = mlxsw_sp_buffers_init(mlxsw_sp);
@@ -3380,12 +3528,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_dpipe_init;
}
- err = mlxsw_sp_dummy_fid_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n");
- goto err_dummy_fid_init;
- }
-
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3395,8 +3537,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
- mlxsw_sp_dummy_fid_fini(mlxsw_sp);
-err_dummy_fid_init:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3413,8 +3553,9 @@ err_switchdev_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
-err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
+err_traps_init:
+ mlxsw_sp_fids_fini(mlxsw_sp);
return err;
}
@@ -3423,7 +3564,6 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
- mlxsw_sp_dummy_fid_fini(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
@@ -3433,8 +3573,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
- WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
- WARN_ON(!list_empty(&mlxsw_sp->fids));
+ mlxsw_sp_fids_fini(mlxsw_sp);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -3450,7 +3589,7 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_fid_offset_flood_tables = 3,
.fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_VFID_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -3510,7 +3649,7 @@ static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
return ret;
}
-static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -3562,176 +3701,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
-static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
- u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
- else
- return test_bit(fid, lag_port->active_vlans);
-}
-
-static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
- u16 lag_id = mlxsw_sp_port->lag_id;
- u64 max_lag_members;
- int i, count = 0;
-
- if (!mlxsw_sp_port->lagged)
- return true;
-
- max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_LAG_MEMBERS);
- for (i = 0; i < max_lag_members; i++) {
- struct mlxsw_sp_port *lag_port;
-
- lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
- if (!lag_port || lag_port->local_port == local_port)
- continue;
- if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
- count++;
- }
-
- return !count;
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
- mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
- mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
- mlxsw_sp_port->local_port);
-
- netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
- mlxsw_sp_port->local_port, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
- mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
- mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
-
- netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
- mlxsw_sp_port->lag_id, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
- return 0;
-
- if (mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
- fid);
- else
- return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
-}
-
-static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_fid *f, *tmp;
-
- list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
- if (--f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp, f);
- else
- WARN_ON_ONCE(1);
-}
-
-static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- return !mlxsw_sp->master_bridge.dev ||
- mlxsw_sp->master_bridge.dev == br_dev;
-}
-
-static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- mlxsw_sp->master_bridge.dev = br_dev;
- mlxsw_sp->master_bridge.ref_count++;
-}
-
-static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
-{
- if (--mlxsw_sp->master_bridge.ref_count == 0) {
- mlxsw_sp->master_bridge.dev = NULL;
- /* It's possible upper VLAN devices are still holding
- * references to underlying FIDs. Drop the reference
- * and release the resources if it was the last one.
- * If it wasn't, then something bad happened.
- */
- mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
- }
-}
-
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *br_dev)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* When port is not bridged untagged packets are tagged with
- * PVID=VID=1, thereby creating an implicit VLAN interface in
- * the device. Remove it and let bridge code take care of its
- * own VLANs.
- */
- err = mlxsw_sp_port_kill_vid(dev, 0, 1);
- if (err)
- return err;
-
- mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
-
- mlxsw_sp_port->learning = 1;
- mlxsw_sp_port->learning_sync = 1;
- mlxsw_sp_port->uc_flood = 1;
- mlxsw_sp_port->mc_flood = 1;
- mlxsw_sp_port->mc_router = 0;
- mlxsw_sp_port->mc_disabled = 1;
- mlxsw_sp_port->bridged = 1;
-
- return 0;
-}
-
-static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
-
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
-
- mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
-
- mlxsw_sp_port->learning = 0;
- mlxsw_sp_port->learning_sync = 0;
- mlxsw_sp_port->uc_flood = 0;
- mlxsw_sp_port->mc_flood = 0;
- mlxsw_sp_port->mc_router = 0;
- mlxsw_sp_port->bridged = 0;
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- mlxsw_sp_port_add_vid(dev, 0, 1);
-}
-
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -3850,51 +3819,11 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
-static void
-mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev, u16 lag_id)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- /* If vPort is assigned a RIF, then leave it since it's no
- * longer valid.
- */
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f)
- f->leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->lag_id = lag_id;
- mlxsw_sp_vport->lagged = 1;
- mlxsw_sp_vport->dev = lag_dev;
-}
-
-static void
-mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *f;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (f)
- f->leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
- mlxsw_sp_vport->lagged = 0;
-}
-
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_upper *lag;
u16 lag_id;
u8 port_index;
@@ -3927,7 +3856,10 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
- mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
+ /* Port is no longer usable as a router interface */
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
return 0;
@@ -3954,10 +3886,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
- if (mlxsw_sp_port->bridged) {
- mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
- }
+ /* Any VLANs configured on the port are no longer valid */
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -3967,7 +3897,9 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -4009,34 +3941,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}
-static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return -EINVAL;
-
- mlxsw_sp_vport->dev = vlan_dev;
-
- return 0;
-}
-
-static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- u16 vid = vlan_dev_vlan_id(vlan_dev);
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
- return;
-
- mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
-}
-
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -4066,9 +3970,12 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
- err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
if (err)
return err;
+ err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_stp_set;
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
true, false);
if (err)
@@ -4077,6 +3984,8 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
err_port_vlan_set:
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+err_port_stp_set:
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
return err;
}
@@ -4085,9 +3994,11 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
}
-static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
+static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
+ struct net_device *dev,
unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
@@ -4110,10 +4021,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
return -EINVAL;
if (!info->linking)
break;
- /* HW limitation forbids to put ports to multiple bridges. */
- if (netif_is_bridge_master(upper_dev) &&
- !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
- return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
@@ -4130,19 +4037,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (is_vlan_dev(upper_dev)) {
- if (info->linking)
- err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
- upper_dev);
- else
- mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
- } else if (netif_is_bridge_master(upper_dev)) {
+ if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ lower_dev,
upper_dev);
else
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ lower_dev,
+ upper_dev);
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking)
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
@@ -4155,9 +4058,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
else
mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
- } else {
- err = -EINVAL;
- WARN_ON(1);
}
break;
}
@@ -4189,15 +4089,18 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
+static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
+ struct net_device *port_dev,
unsigned long event, void *ptr)
{
switch (event) {
case NETDEV_PRECHANGEUPPER:
case NETDEV_CHANGEUPPER:
- return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
+ return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
+ event, ptr);
case NETDEV_CHANGELOWERSTATE:
- return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
+ return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
+ ptr);
}
return 0;
@@ -4212,7 +4115,8 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
- ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
+ ptr);
if (ret)
return ret;
}
@@ -4221,322 +4125,33 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
return 0;
}
-static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
- struct net_device *vlan_dev)
-{
- u16 fid = vlan_dev_vlan_id(vlan_dev);
- struct mlxsw_sp_fid *f;
-
- f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (!f) {
- f = mlxsw_sp_fid_create(mlxsw_sp, fid);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- f->ref_count++;
-
- return 0;
-}
-
-static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
- struct net_device *vlan_dev)
-{
- u16 fid = vlan_dev_vlan_id(vlan_dev);
- struct mlxsw_sp_fid *f;
-
- f = mlxsw_sp_fid_find(mlxsw_sp, fid);
- if (f && f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
- if (f && --f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp, f);
-}
-
-static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
- unsigned long event, void *ptr)
-{
- struct netdev_notifier_changeupper_info *info;
- struct net_device *upper_dev;
- struct mlxsw_sp *mlxsw_sp;
- int err = 0;
-
- mlxsw_sp = mlxsw_sp_lower_get(br_dev);
- if (!mlxsw_sp)
- return 0;
-
- info = ptr;
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- upper_dev = info->upper_dev;
- if (!is_vlan_dev(upper_dev))
- return -EINVAL;
- if (is_vlan_dev(upper_dev) &&
- br_dev != mlxsw_sp->master_bridge.dev)
- return -EINVAL;
- break;
- case NETDEV_CHANGEUPPER:
- upper_dev = info->upper_dev;
- if (is_vlan_dev(upper_dev)) {
- if (info->linking)
- err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
- upper_dev);
- else
- mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
- upper_dev);
- } else {
- err = -EINVAL;
- WARN_ON(1);
- }
- break;
- }
-
- return err;
-}
-
-static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
-{
- return find_first_zero_bit(mlxsw_sp->vfids.mapped,
- MLXSW_SP_VFID_MAX);
-}
-
-static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_fid *f;
- u16 vfid, fid;
- int err;
-
- vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
- if (vfid == MLXSW_SP_VFID_MAX) {
- dev_err(dev, "No available vFIDs\n");
- return ERR_PTR(-ERANGE);
- }
-
- fid = mlxsw_sp_vfid_to_fid(vfid);
- err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
- if (err) {
- dev_err(dev, "Failed to create FID=%d\n", fid);
- return ERR_PTR(err);
- }
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- goto err_allocate_vfid;
-
- f->leave = mlxsw_sp_vport_vfid_leave;
- f->fid = fid;
- f->dev = br_dev;
-
- list_add(&f->list, &mlxsw_sp->vfids.list);
- set_bit(vfid, mlxsw_sp->vfids.mapped);
-
- return f;
-
-err_allocate_vfid:
- mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
- return ERR_PTR(-ENOMEM);
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fid *f)
-{
- u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
- u16 fid = f->fid;
-
- clear_bit(vfid, mlxsw_sp->vfids.mapped);
- list_del(&f->list);
-
- if (f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
-
- kfree(f);
-
- mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
-}
-
-static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
- vid);
-}
-
-static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f;
- int err;
-
- f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
- if (!f) {
- f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
- if (IS_ERR(f))
- return PTR_ERR(f);
- }
-
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
- if (err)
- goto err_vport_flood_set;
-
- err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
- if (err)
- goto err_vport_fid_map;
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
- f->ref_count++;
-
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
-
- return 0;
-
-err_vport_fid_map:
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
-err_vport_flood_set:
- if (!f->ref_count)
- mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
- return err;
-}
-
-static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
-
- mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
-
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
-
- mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
-
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
-}
-
-static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- struct net_device *dev = mlxsw_sp_vport->dev;
- int err;
-
- if (f && !WARN_ON(!f->leave))
- f->leave(mlxsw_sp_vport);
-
- err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
- if (err) {
- netdev_err(dev, "Failed to join vFID\n");
- return err;
- }
-
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning\n");
- goto err_port_vid_learning_set;
- }
-
- mlxsw_sp_vport->learning = 1;
- mlxsw_sp_vport->learning_sync = 1;
- mlxsw_sp_vport->uc_flood = 1;
- mlxsw_sp_vport->mc_flood = 1;
- mlxsw_sp_vport->mc_router = 0;
- mlxsw_sp_vport->mc_disabled = 1;
- mlxsw_sp_vport->bridged = 1;
-
- return 0;
-
-err_port_vid_learning_set:
- mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
- return err;
-}
-
-static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
-
- mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
-
- mlxsw_sp_vport->learning = 0;
- mlxsw_sp_vport->learning_sync = 0;
- mlxsw_sp_vport->uc_flood = 0;
- mlxsw_sp_vport->mc_flood = 0;
- mlxsw_sp_vport->mc_router = 0;
- mlxsw_sp_vport->bridged = 0;
-}
-
-static bool
-mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
- const struct net_device *br_dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
-
- if (dev && dev == br_dev)
- return false;
- }
-
- return true;
-}
-
-static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
- unsigned long event, void *ptr,
- u16 vid)
+static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *dev,
+ unsigned long event, void *ptr,
+ u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
- struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
int err = 0;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport)
- return 0;
-
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev))
return -EINVAL;
- if (!info->linking)
- break;
- /* We can't have multiple VLAN interfaces configured on
- * the same port and being members in the same bridge.
- */
- if (netif_is_bridge_master(upper_dev) &&
- !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
- upper_dev))
- return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
- err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
- upper_dev);
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
else
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+ vlan_dev,
+ upper_dev);
} else {
err = -EINVAL;
WARN_ON(1);
@@ -4547,9 +4162,10 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
return err;
}
-static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
- unsigned long event, void *ptr,
- u16 vid)
+static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
+ struct net_device *lag_dev,
+ unsigned long event,
+ void *ptr, u16 vid)
{
struct net_device *dev;
struct list_head *iter;
@@ -4557,8 +4173,9 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
- ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
- vid);
+ ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
+ event, ptr,
+ vid);
if (ret)
return ret;
}
@@ -4574,11 +4191,12 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
- vid);
+ return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
else if (netif_is_lag_master(real_dev))
- return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
- vid);
+ return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
+ real_dev, event,
+ ptr, vid);
return 0;
}
@@ -4603,11 +4221,9 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
- else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
else if (is_vlan_dev(dev))
err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
@@ -4680,3 +4296,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
+MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0c23bc1e946d..1a834109bda1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -54,12 +54,7 @@
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
-#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
-
-#define MLXSW_SP_DUMMY_FID 15359
-
-#define MLXSW_SP_RFID_BASE 15360
+#define MLXSW_SP_FID_8021D_MAX 1024
#define MLXSW_SP_MID_MAX 7000
@@ -78,13 +73,19 @@ struct mlxsw_sp_upper {
unsigned int ref_count;
};
-struct mlxsw_sp_fid {
- void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
- struct list_head list;
- unsigned int ref_count;
- struct net_device *dev;
- struct mlxsw_sp_rif *rif;
- u16 fid;
+enum mlxsw_sp_rif_type {
+ MLXSW_SP_RIF_TYPE_SUBPORT,
+ MLXSW_SP_RIF_TYPE_VLAN,
+ MLXSW_SP_RIF_TYPE_FID,
+ MLXSW_SP_RIF_TYPE_MAX,
+};
+
+enum mlxsw_sp_fid_type {
+ MLXSW_SP_FID_TYPE_8021Q,
+ MLXSW_SP_FID_TYPE_8021D,
+ MLXSW_SP_FID_TYPE_RFID,
+ MLXSW_SP_FID_TYPE_DUMMY,
+ MLXSW_SP_FID_TYPE_MAX,
};
struct mlxsw_sp_mid {
@@ -95,85 +96,6 @@ struct mlxsw_sp_mid {
unsigned int ref_count;
};
-static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
-{
- return MLXSW_SP_VFID_BASE + vfid;
-}
-
-static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
-{
- return fid - MLXSW_SP_VFID_BASE;
-}
-
-static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
-{
- return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
-}
-
-struct mlxsw_sp_sb_pr {
- enum mlxsw_reg_sbpr_mode mode;
- u32 size;
-};
-
-struct mlxsw_cp_sb_occ {
- u32 cur;
- u32 max;
-};
-
-struct mlxsw_sp_sb_cm {
- u32 min_buff;
- u32 max_buff;
- u8 pool;
- struct mlxsw_cp_sb_occ occ;
-};
-
-struct mlxsw_sp_sb_pm {
- u32 min_buff;
- u32 max_buff;
- struct mlxsw_cp_sb_occ occ;
-};
-
-#define MLXSW_SP_SB_POOL_COUNT 4
-#define MLXSW_SP_SB_TC_COUNT 8
-
-struct mlxsw_sp_sb_port {
- struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
-};
-
-struct mlxsw_sp_sb {
- struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
- struct mlxsw_sp_sb_port *ports;
- u32 cell_size;
-};
-
-#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
-
-struct mlxsw_sp_prefix_usage {
- DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
-};
-
-enum mlxsw_sp_l3proto {
- MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_SP_L3_PROTO_IPV6,
-};
-
-struct mlxsw_sp_lpm_tree {
- u8 id; /* tree ID */
- unsigned int ref_count;
- enum mlxsw_sp_l3proto proto;
- struct mlxsw_sp_prefix_usage prefix_usage;
-};
-
-struct mlxsw_sp_fib;
-
-struct mlxsw_sp_vr {
- u16 id; /* virtual router ID */
- u32 tb_id; /* kernel fib table id */
- unsigned int rif_count;
- struct mlxsw_sp_fib *fib4;
-};
-
enum mlxsw_sp_span_type {
MLXSW_SP_SPAN_EGRESS,
MLXSW_SP_SPAN_INGRESS
@@ -212,58 +134,25 @@ struct mlxsw_sp_port_mall_tc_entry {
};
};
-struct mlxsw_sp_router {
- struct mlxsw_sp_vr *vrs;
- struct rhashtable neigh_ht;
- struct rhashtable nexthop_group_ht;
- struct rhashtable nexthop_ht;
- struct {
- struct mlxsw_sp_lpm_tree *trees;
- unsigned int tree_count;
- } lpm;
- struct {
- struct delayed_work dw;
- unsigned long interval; /* ms */
- } neighs_update;
- struct delayed_work nexthop_probe_dw;
-#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
- struct list_head nexthop_neighs_list;
- bool aborted;
-};
-
+struct mlxsw_sp_sb;
+struct mlxsw_sp_bridge;
+struct mlxsw_sp_router;
struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
+struct mlxsw_sp_fid_core;
struct mlxsw_sp {
- struct {
- struct list_head list;
- DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
- } vfids;
- struct {
- struct list_head list;
- DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
- } br_mids;
- struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
- struct {
- struct delayed_work dw;
-#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
- unsigned int interval; /* ms */
- } fdb_notify;
-#define MLXSW_SP_MIN_AGEING_TIME 10
-#define MLXSW_SP_MAX_AGEING_TIME 1000000
-#define MLXSW_SP_DEFAULT_AGEING_TIME 300
- u32 ageing_time;
- struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper *lags;
u8 *port_to_module;
- struct mlxsw_sp_sb sb;
- struct mlxsw_sp_router router;
+ struct mlxsw_sp_sb *sb;
+ struct mlxsw_sp_bridge *bridge;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp_acl *acl;
+ struct mlxsw_sp_fid_core *fid_core;
struct {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
@@ -273,7 +162,6 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
- struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -282,18 +170,6 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
return &mlxsw_sp->lags[lag_id];
}
-static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
- u32 cells)
-{
- return mlxsw_sp->sb.cell_size * cells;
-}
-
-static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
- u32 bytes)
-{
- return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -310,29 +186,28 @@ struct mlxsw_sp_port_sample {
bool truncate;
};
+struct mlxsw_sp_bridge_port;
+struct mlxsw_sp_fid;
+
+struct mlxsw_sp_port_vlan {
+ struct list_head list;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct list_head bridge_vlan_node;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
u8 local_port;
- u8 stp_state;
- u16 learning:1,
- learning_sync:1,
- uc_flood:1,
- mc_flood:1,
- mc_router:1,
- mc_disabled:1,
- bridged:1,
- lagged:1,
+ u8 lagged:1,
split:1;
u16 pvid;
u16 lag_id;
struct {
- struct list_head list;
- struct mlxsw_sp_fid *f;
- u16 vid;
- } vport;
- struct {
u8 tx_pause:1,
rx_pause:1,
autoneg:1;
@@ -347,11 +222,6 @@ struct mlxsw_sp_port {
u8 width;
u8 lane;
} mapping;
- /* 802.1Q bridge VLANs */
- unsigned long *active_vlans;
- unsigned long *untagged_vlans;
- /* VLAN interfaces */
- struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
struct {
@@ -360,10 +230,12 @@ struct mlxsw_sp_port {
struct delayed_work update_dw;
} hw_stats;
struct mlxsw_sp_port_sample *sample;
+ struct list_head vlans_list;
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
+struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -385,100 +257,25 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
-static inline u16
-mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- return mlxsw_sp_vport->vport.vid;
-}
-
-static inline bool
-mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
-{
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
-
- return vid != 0;
-}
-
-static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_fid *f)
-{
- mlxsw_sp_vport->vport.f = f;
-}
-
-static inline struct mlxsw_sp_fid *
-mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- return mlxsw_sp_vport->vport.f;
-}
-
-static inline struct net_device *
-mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
-
- return f ? f->dev : NULL;
-}
-
-static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
- return mlxsw_sp_vport;
- }
-
- return NULL;
-}
-
-static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
+static inline struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (f && f->fid == fid)
- return mlxsw_sp_vport;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->vid == vid)
+ return mlxsw_sp_port_vlan;
}
return NULL;
}
-static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
- u16 fid)
-{
- struct mlxsw_sp_fid *f;
-
- list_for_each_entry(f, &mlxsw_sp->fids, list)
- if (f->fid == fid)
- return f;
-
- return NULL;
-}
-
-static inline struct mlxsw_sp_fid *
-mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev)
-{
- struct mlxsw_sp_fid *f;
-
- list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
- if (f->dev == br_dev)
- return f;
-
- return NULL;
-}
-
-enum mlxsw_sp_flood_table {
- MLXSW_SP_FLOOD_TABLE_UC,
- MLXSW_SP_FLOOD_TABLE_BC,
- MLXSW_SP_FLOOD_TABLE_MC,
+enum mlxsw_sp_flood_type {
+ MLXSW_SP_FLOOD_TYPE_UC,
+ MLXSW_SP_FLOOD_TYPE_BC,
+ MLXSW_SP_FLOOD_TYPE_MC,
};
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
@@ -515,26 +312,26 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
- u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool set);
-void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding);
-struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
-void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev);
+
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -546,9 +343,15 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate);
-int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable);
+int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u8 state);
+int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
+int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ bool learn_enable);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -574,10 +377,11 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index);
@@ -683,6 +487,8 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
+
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
@@ -702,4 +508,31 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ bool member);
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid);
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
+void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type);
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 317f7b14627f..1da889a044df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -53,6 +53,7 @@ struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
+ struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
@@ -112,6 +113,11 @@ static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
.automatic_shrinking = true,
};
+struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp->acl->dummy_fid;
+}
+
static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_profile_ops *ops)
@@ -676,6 +682,7 @@ static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
+ struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl;
int err;
@@ -706,6 +713,13 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rhashtable_init;
+ fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
+ }
+ acl->dummy_fid = fid;
+
INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
@@ -721,6 +735,8 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_acl_ops_init:
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
err_rhashtable_init:
mlxsw_afa_destroy(acl->afa);
@@ -739,6 +755,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
WARN_ON(!list_empty(&acl->rules));
+ mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
index af7b7bad48df..85d5001a5818 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
@@ -68,6 +68,11 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+ MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */
+};
+
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12),
MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3),
@@ -102,6 +107,7 @@ static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+ MLXSW_AFK_BLOCK(0x32, mlxsw_sp_afk_element_info_ipv4),
MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 3a24289979d9..61a10f166f97 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -983,6 +983,7 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_L4_PORT,
MLXSW_AFK_ELEMENT_VID,
MLXSW_AFK_ELEMENT_PCP,
+ MLXSW_AFK_ELEMENT_TCP_FLAGS,
};
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 997189cfe7fd..93728c694e6d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -43,25 +43,72 @@
#include "port.h"
#include "reg.h"
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT 8
+
+struct mlxsw_sp_sb_port {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+};
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+ struct mlxsw_sp_sb_port *ports;
+ u32 cell_size;
+};
+
+u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
+{
+ return mlxsw_sp->sb->cell_size * cells;
+}
+
+u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
+{
+ return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
+}
+
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.prs[dir][pool];
+ return &mlxsw_sp->sb->prs[dir][pool];
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+ return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+ return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
}
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
@@ -215,16 +262,17 @@ static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
- GFP_KERNEL);
- if (!mlxsw_sp->sb.ports)
+ mlxsw_sp->sb->ports = kcalloc(max_ports,
+ sizeof(struct mlxsw_sp_sb_port),
+ GFP_KERNEL);
+ if (!mlxsw_sp->sb->ports)
return -ENOMEM;
return 0;
}
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{
- kfree(mlxsw_sp->sb.ports);
+ kfree(mlxsw_sp->sb->ports);
}
#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
@@ -551,15 +599,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
return -EIO;
- mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EIO;
sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
+ mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
+ if (!mlxsw_sp->sb)
+ return -ENOMEM;
+ mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
- return err;
+ goto err_sb_ports_init;
err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err)
goto err_sb_prs_init;
@@ -584,6 +636,8 @@ err_sb_mms_init:
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
mlxsw_sp_sb_ports_fini(mlxsw_sp);
+err_sb_ports_init:
+ kfree(mlxsw_sp->sb);
return err;
}
@@ -591,6 +645,7 @@ void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
+ kfree(mlxsw_sp->sb);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5f0a7bc692a4..af2c65a3fd9f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -218,7 +218,7 @@ static int
mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
struct devlink_dpipe_dump_ctx *dump_ctx)
{
- struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}};
+ struct devlink_dpipe_value match_value, action_value;
struct devlink_dpipe_action action = {0};
struct devlink_dpipe_match match = {0};
struct devlink_dpipe_entry entry = {0};
@@ -227,6 +227,9 @@ mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled,
int i, j;
int err;
+ memset(&match_value, 0, sizeof(match_value));
+ memset(&action_value, 0, sizeof(action_value));
+
mlxsw_sp_erif_match_action_prepare(&match, &action);
err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match,
&action_value, &action);
@@ -242,10 +245,11 @@ start_again:
return err;
j = 0;
for (; i < rif_count; i++) {
- if (!mlxsw_sp->rifs[i])
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
continue;
- err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry,
- mlxsw_sp->rifs[i],
+ err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
counters_enabled);
if (err)
goto err_entry_get;
@@ -282,15 +286,15 @@ static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable)
rtnl_lock();
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
- if (!mlxsw_sp->rifs[i])
+ struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
+
+ if (!rif)
continue;
if (enable)
- mlxsw_sp_rif_counter_alloc(mlxsw_sp,
- mlxsw_sp->rifs[i],
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
else
- mlxsw_sp_rif_counter_free(mlxsw_sp,
- mlxsw_sp->rifs[i],
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
new file mode 100644
index 000000000000..c7590aea1aee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -0,0 +1,992 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_fid_family;
+
+struct mlxsw_sp_fid_core {
+ struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
+ unsigned int *port_fid_mappings;
+};
+
+struct mlxsw_sp_fid {
+ struct list_head list;
+ struct mlxsw_sp_rif *rif;
+ unsigned int ref_count;
+ u16 fid_index;
+ struct mlxsw_sp_fid_family *fid_family;
+};
+
+struct mlxsw_sp_fid_8021q {
+ struct mlxsw_sp_fid common;
+ u16 vid;
+};
+
+struct mlxsw_sp_fid_8021d {
+ struct mlxsw_sp_fid common;
+ int br_ifindex;
+};
+
+struct mlxsw_sp_flood_table {
+ enum mlxsw_sp_flood_type packet_type;
+ enum mlxsw_reg_sfgc_bridge_type bridge_type;
+ enum mlxsw_flood_table_type table_type;
+ int table_index;
+};
+
+struct mlxsw_sp_fid_ops {
+ void (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
+ int (*configure)(struct mlxsw_sp_fid *fid);
+ void (*deconfigure)(struct mlxsw_sp_fid *fid);
+ int (*index_alloc)(struct mlxsw_sp_fid *fid, const void *arg,
+ u16 *p_fid_index);
+ bool (*compare)(const struct mlxsw_sp_fid *fid,
+ const void *arg);
+ u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
+ int (*port_vid_map)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+ void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *port, u16 vid);
+};
+
+struct mlxsw_sp_fid_family {
+ enum mlxsw_sp_fid_type type;
+ size_t fid_size;
+ u16 start_index;
+ u16 end_index;
+ struct list_head fids_list;
+ unsigned long *fids_bitmap;
+ const struct mlxsw_sp_flood_table *flood_tables;
+ int nr_flood_tables;
+ enum mlxsw_sp_rif_type rif_type;
+ const struct mlxsw_sp_fid_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+};
+
+static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+};
+
+static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
+static const int *mlxsw_sp_packet_type_sfgc_types[] = {
+ [MLXSW_SP_FLOOD_TYPE_UC] = mlxsw_sp_sfgc_uc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_BC] = mlxsw_sp_sfgc_bc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+};
+
+static const struct mlxsw_sp_flood_table *
+mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ if (fid_family->flood_tables[i].packet_type != packet_type)
+ continue;
+ return &fid_family->flood_tables[i];
+ }
+
+ return NULL;
+}
+
+int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ bool member)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ const struct mlxsw_sp_flood_table *flood_table;
+ char *sftr_pl;
+ int err;
+
+ if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ return -EINVAL;
+
+ flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
+ if (!flood_table)
+ return -ESRCH;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index,
+ ops->flood_index(fid), flood_table->table_type, 1,
+ local_port, member);
+ err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr),
+ sftr_pl);
+ kfree(sftr_pl);
+ return err;
+}
+
+int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ if (WARN_ON(!fid->fid_family->ops->port_vid_map))
+ return -EINVAL;
+ return fid->fid_family->ops->port_vid_map(fid, mlxsw_sp_port, vid);
+}
+
+void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid);
+}
+
+enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->rif_type;
+}
+
+u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->type;
+}
+
+void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ fid->rif = rif;
+}
+
+enum mlxsw_sp_rif_type
+mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+
+ return fid_core->fid_family_arr[type]->rif_type;
+}
+
+static struct mlxsw_sp_fid_8021q *
+mlxsw_sp_fid_8021q_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021q, common);
+}
+
+u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_fid(fid)->vid;
+}
+
+static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+}
+
+static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
+{
+ return valid ? MLXSW_REG_SFMR_OP_CREATE_FID :
+ MLXSW_REG_SFMR_OP_DESTROY_FID;
+}
+
+static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u16 fid_offset, bool valid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
+ fid_offset);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u16 vid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ u8 local_port, u16 vid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_8021q *fid_8021q;
+ int err;
+
+ err = mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, fid->fid_index, true);
+ if (err)
+ return err;
+
+ fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ err = mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid,
+ true);
+ if (err)
+ goto err_fid_map;
+
+ return 0;
+
+err_fid_map:
+ mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_8021q *fid_8021q;
+
+ fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid, false);
+ mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_8021q_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 vid = *(u16 *) arg;
+
+ /* Use 1:1 mapping for simplicity although not a must */
+ if (vid < fid_family->start_index || vid > fid_family->end_index)
+ return -EINVAL;
+ *p_fid_index = vid;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
+}
+
+static u16 mlxsw_sp_fid_8021q_flood_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index;
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
+ return 0;
+ return __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port,
+ vid, true);
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
+ return;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port, vid,
+ false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021q_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .flood_index = mlxsw_sp_fid_8021q_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021q_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST,
+ .table_index = 2,
+ },
+};
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = 1,
+ .end_index = VLAN_VID_MASK,
+ .flood_tables = mlxsw_sp_fid_8021q_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021q_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+};
+
+static struct mlxsw_sp_fid_8021d *
+mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
+{
+ return container_of(fid, struct mlxsw_sp_fid_8021d, common);
+}
+
+static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+}
+
+static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+}
+
+static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 nr_fids, fid_index;
+
+ nr_fids = fid_family->end_index - fid_family->start_index + 1;
+ fid_index = find_first_zero_bit(fid_family->fids_bitmap, nr_fids);
+ if (fid_index == nr_fids)
+ return -ENOBUFS;
+ *p_fid_index = fid_family->start_index + fid_index;
+
+ return 0;
+}
+
+static bool
+mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ int br_ifindex = *(int *) arg;
+
+ return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
+}
+
+static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_index - fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port,
+ vid, true);
+ if (err)
+ goto err_fid_port_vid_map;
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_vp_mode_set;
+
+ return 0;
+
+err_port_vp_mode_set:
+err_fid_port_vid_map:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid,
+ false);
+ }
+ return err;
+}
+
+static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+
+ list_for_each_entry_reverse(mlxsw_sp_port_vlan,
+ &mlxsw_sp_port->vlans_list, list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+
+ if (!fid)
+ continue;
+
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid,
+ false);
+ }
+}
+
+static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, true);
+ if (err)
+ return err;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ mlxsw_sp_port->local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
+ .setup = mlxsw_sp_fid_8021d_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021d_compare,
+ .flood_index = mlxsw_sp_fid_8021d_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 1,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
+ .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_index = 2,
+ },
+};
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = VLAN_N_VID,
+ .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+};
+
+static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
+{
+ /* rFIDs are allocated by the device during init */
+ return 0;
+}
+
+static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
+{
+}
+
+static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ *p_fid_index = fid->fid_family->start_index + rif_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_rfid_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ u16 rif_index = *(u16 *) arg;
+
+ return fid->fid_index == rif_index + fid->fid_family->start_index;
+}
+
+static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* We only need to transition the port to virtual mode since
+ * {Port, VID} => FID is done by the firmware upon RIF creation.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err)
+ goto err_port_vp_mode_trans;
+ }
+
+ return 0;
+
+err_port_vp_mode_trans:
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ return err;
+}
+
+static void
+mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .configure = mlxsw_sp_fid_rfid_configure,
+ .deconfigure = mlxsw_sp_fid_rfid_deconfigure,
+ .index_alloc = mlxsw_sp_fid_rfid_index_alloc,
+ .compare = mlxsw_sp_fid_rfid_compare,
+ .port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+};
+
+#define MLXSW_SP_RFID_BASE (15 * 1024)
+#define MLXSW_SP_RFID_MAX 1024
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_BASE,
+ .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+};
+
+static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+
+ return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+}
+
+static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+}
+
+static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
+ const void *arg, u16 *p_fid_index)
+{
+ *p_fid_index = fid->fid_family->start_index;
+
+ return 0;
+}
+
+static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ return true;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .configure = mlxsw_sp_fid_dummy_configure,
+ .deconfigure = mlxsw_sp_fid_dummy_deconfigure,
+ .index_alloc = mlxsw_sp_fid_dummy_index_alloc,
+ .compare = mlxsw_sp_fid_dummy_compare,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_BASE - 1,
+ .end_index = MLXSW_SP_RFID_BASE - 1,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+};
+
+static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
+};
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ int err;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
+ list_for_each_entry(fid, &fid_family->fids_list, list) {
+ if (!fid->fid_family->ops->compare(fid, arg))
+ continue;
+ fid->ref_count++;
+ return fid;
+ }
+
+ fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
+ if (!fid)
+ return ERR_PTR(-ENOMEM);
+ fid->fid_family = fid_family;
+
+ err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
+ if (err)
+ goto err_index_alloc;
+ fid->fid_index = fid_index;
+ __set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
+
+ if (fid->fid_family->ops->setup)
+ fid->fid_family->ops->setup(fid, arg);
+
+ err = fid->fid_family->ops->configure(fid);
+ if (err)
+ goto err_configure;
+
+ list_add(&fid->list, &fid_family->fids_list);
+ fid->ref_count++;
+ return fid;
+
+err_configure:
+ __clear_bit(fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+err_index_alloc:
+ kfree(fid);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ if (--fid->ref_count == 1 && fid->rif) {
+ /* Destroy the associated RIF and let it drop the last
+ * reference on the FID.
+ */
+ return mlxsw_sp_rif_destroy(fid->rif);
+ } else if (fid->ref_count == 0) {
+ list_del(&fid->list);
+ fid->fid_family->ops->deconfigure(fid);
+ __clear_bit(fid->fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+ kfree(fid);
+ }
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_RFID, &rif_index);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_DUMMY, NULL);
+}
+
+static int
+mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ const int *sfgc_packet_types;
+ int i;
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ int err;
+
+ if (!sfgc_packet_types[i])
+ continue;
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
+ flood_table->table_type,
+ flood_table->table_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+ int err;
+
+ flood_table = &fid_family->flood_tables[i];
+ err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_family *tmpl)
+{
+ u16 nr_fids = tmpl->end_index - tmpl->start_index + 1;
+ struct mlxsw_sp_fid_family *fid_family;
+ int err;
+
+ fid_family = kmemdup(tmpl, sizeof(*fid_family), GFP_KERNEL);
+ if (!fid_family)
+ return -ENOMEM;
+
+ fid_family->mlxsw_sp = mlxsw_sp;
+ INIT_LIST_HEAD(&fid_family->fids_list);
+ fid_family->fids_bitmap = kcalloc(BITS_TO_LONGS(nr_fids),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!fid_family->fids_bitmap) {
+ err = -ENOMEM;
+ goto err_alloc_fids_bitmap;
+ }
+
+ if (fid_family->flood_tables) {
+ err = mlxsw_sp_fid_flood_tables_init(fid_family);
+ if (err)
+ goto err_fid_flood_tables_init;
+ }
+
+ mlxsw_sp->fid_core->fid_family_arr[tmpl->type] = fid_family;
+
+ return 0;
+
+err_fid_flood_tables_init:
+ kfree(fid_family->fids_bitmap);
+err_alloc_fids_bitmap:
+ kfree(fid_family);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid_family *fid_family)
+{
+ mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+ kfree(fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
+ kfree(fid_family);
+}
+
+int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ /* Track number of FIDs configured on the port with mapping type
+ * PORT_VID_TO_FID, so that we know when to transition the port
+ * back to non-virtual (VLAN) mode.
+ */
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+
+ return mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+}
+
+void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
+}
+
+int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ struct mlxsw_sp_fid_core *fid_core;
+ int err, i;
+
+ fid_core = kzalloc(sizeof(*mlxsw_sp->fid_core), GFP_KERNEL);
+ if (!fid_core)
+ return -ENOMEM;
+ mlxsw_sp->fid_core = fid_core;
+
+ fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!fid_core->port_fid_mappings) {
+ err = -ENOMEM;
+ goto err_alloc_port_fid_mappings;
+ }
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
+ err = mlxsw_sp_fid_family_register(mlxsw_sp,
+ mlxsw_sp_fid_family_arr[i]);
+
+ if (err)
+ goto err_fid_ops_register;
+ }
+
+ return 0;
+
+err_fid_ops_register:
+ for (i--; i >= 0; i--) {
+ struct mlxsw_sp_fid_family *fid_family;
+
+ fid_family = fid_core->fid_family_arr[i];
+ mlxsw_sp_fid_family_unregister(mlxsw_sp, fid_family);
+ }
+ kfree(fid_core->port_fid_mappings);
+err_alloc_port_fid_mappings:
+ kfree(fid_core);
+ return err;
+}
+
+void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++)
+ mlxsw_sp_fid_family_unregister(mlxsw_sp,
+ fid_core->fid_family_arr[i]);
+ kfree(fid_core->port_fid_mappings);
+ kfree(fid_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 7d87e23578a3..ed75c6a85bc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -70,9 +70,13 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
+ fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
- MLXSW_SP_DUMMY_FID);
+ fid_index);
if (err)
return err;
@@ -178,6 +182,32 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct tc_cls_flower_offload *f,
+ u8 ip_proto)
+{
+ struct flow_dissector_key_tcp *key, *mask;
+
+ if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP) {
+ dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
+ return -EINVAL;
+ }
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->key);
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->mask);
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
+ ntohs(key->flags), ntohs(mask->flags));
+ return 0;
+}
+
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -194,6 +224,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
return -EOPNOTSUPP;
@@ -285,6 +316,9 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
+ err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9f89c4137d21..a4272c351e3a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -42,6 +42,7 @@
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -56,21 +57,82 @@
#include "spectrum_dpipe.h"
#include "spectrum_router.h"
+struct mlxsw_sp_vr;
+struct mlxsw_sp_lpm_tree;
+struct mlxsw_sp_rif_ops;
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif **rifs;
+ struct mlxsw_sp_vr *vrs;
+ struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
+ struct {
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_neighs_list;
+ bool aborted;
+ struct notifier_block fib_nb;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+};
+
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
struct net_device *dev;
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif_index;
u16 vr_id;
+ const struct mlxsw_sp_rif_ops *ops;
+ struct mlxsw_sp *mlxsw_sp;
+
unsigned int counter_ingress;
bool counter_ingress_valid;
unsigned int counter_egress;
bool counter_egress_valid;
};
+struct mlxsw_sp_rif_params {
+ struct net_device *dev;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_subport {
+ struct mlxsw_sp_rif common;
+ union {
+ u16 system_port;
+ u16 lag_id;
+ };
+ u16 vid;
+ bool lag;
+};
+
+struct mlxsw_sp_rif_ops {
+ enum mlxsw_sp_rif_type type;
+ size_t rif_size;
+
+ void (*setup)(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params);
+ int (*configure)(struct mlxsw_sp_rif *rif);
+ void (*deconfigure)(struct mlxsw_sp_rif *rif);
+ struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+};
+
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir)
@@ -219,10 +281,35 @@ void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counter_valid_set(rif, dir, false);
}
+static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
+ return;
+ mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
+static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
@@ -287,6 +374,7 @@ enum mlxsw_sp_fib_entry_type {
};
struct mlxsw_sp_nexthop_group;
+struct mlxsw_sp_fib;
struct mlxsw_sp_fib_node {
struct list_head entry_list;
@@ -313,6 +401,18 @@ struct mlxsw_sp_fib_entry {
bool offloaded;
};
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
struct mlxsw_sp_fib {
struct rhashtable ht;
struct list_head node_list;
@@ -323,6 +423,13 @@ struct mlxsw_sp_fib {
enum mlxsw_sp_l3proto proto;
};
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ u32 tb_id; /* kernel fib table id */
+ unsigned int rif_count;
+ struct mlxsw_sp_fib *fib4;
+};
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
@@ -361,8 +468,8 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count == 0)
return lpm_tree;
}
@@ -458,8 +565,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
if (lpm_tree->ref_count != 0 &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
@@ -496,15 +603,15 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
- mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
- mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
+ mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
+ mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
sizeof(struct mlxsw_sp_lpm_tree),
GFP_KERNEL);
- if (!mlxsw_sp->router.lpm.trees)
+ if (!mlxsw_sp->router->lpm.trees)
return -ENOMEM;
- for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
- lpm_tree = &mlxsw_sp->router.lpm.trees[i];
+ for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
+ lpm_tree = &mlxsw_sp->router->lpm.trees[i];
lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
}
@@ -513,7 +620,7 @@ static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
- kfree(mlxsw_sp->router.lpm.trees);
+ kfree(mlxsw_sp->router->lpm.trees);
}
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
@@ -527,7 +634,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
return vr;
}
@@ -573,7 +680,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
tb_id = mlxsw_sp_fix_tb_id(tb_id);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
}
@@ -680,13 +787,13 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
- mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
- GFP_KERNEL);
- if (!mlxsw_sp->router.vrs)
+ mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->vrs)
return -ENOMEM;
for (i = 0; i < max_vrs; i++) {
- vr = &mlxsw_sp->router.vrs[i];
+ vr = &mlxsw_sp->router->vrs[i];
vr->id = i;
}
@@ -706,7 +813,7 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
*/
mlxsw_core_flush_owq();
mlxsw_sp_router_fib_flush(mlxsw_sp);
- kfree(mlxsw_sp->router.vrs);
+ kfree(mlxsw_sp->router->vrs);
}
struct mlxsw_sp_neigh_key {
@@ -758,7 +865,7 @@ static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
&neigh_entry->ht_node,
mlxsw_sp_neigh_ht_params);
}
@@ -767,7 +874,7 @@ static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry)
{
- rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
&neigh_entry->ht_node,
mlxsw_sp_neigh_ht_params);
}
@@ -815,7 +922,7 @@ mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
struct mlxsw_sp_neigh_key key;
key.n = n;
- return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
&key, mlxsw_sp_neigh_ht_params);
}
@@ -824,7 +931,7 @@ mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
{
unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
- mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
+ mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
}
static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
@@ -839,13 +946,13 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
- if (!mlxsw_sp->rifs[rif]) {
+ if (!mlxsw_sp->router->rifs[rif]) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
return;
}
dipn = htonl(dip);
- dev = mlxsw_sp->rifs[rif]->dev;
+ dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
if (!n) {
netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
@@ -954,7 +1061,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
/* Take RTNL mutex here to prevent lists from changes */
rtnl_lock();
- list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
nexthop_neighs_list_node)
/* If this neigh have nexthops, make the kernel think this neigh
* is active regardless of the traffic.
@@ -966,33 +1073,35 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- unsigned long interval = mlxsw_sp->router.neighs_update.interval;
+ unsigned long interval = mlxsw_sp->router->neighs_update.interval;
- mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
msecs_to_jiffies(interval));
}
static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
{
- struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
- router.neighs_update.dw.work);
+ struct mlxsw_sp_router *router;
int err;
- err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
+ router = container_of(work, struct mlxsw_sp_router,
+ neighs_update.dw.work);
+ err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+ dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
- mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
+ mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
- mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
+ mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
}
static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
- router.nexthop_probe_dw.work);
+ struct mlxsw_sp_router *router;
+ router = container_of(work, struct mlxsw_sp_router,
+ nexthop_probe_dw.work);
/* Iterate over nexthop neighbours, find those who are unresolved and
* send arp on them. This solves the chicken-egg problem when
* the nexthop wouldn't get offloaded until the neighbor is resolved
@@ -1002,13 +1111,13 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
* Take RTNL mutex here to prevent lists from changes.
*/
rtnl_lock();
- list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
nexthop_neighs_list_node)
if (!neigh_entry->connected)
neigh_event_send(neigh_entry->key.n, NULL);
rtnl_unlock();
- mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
+ mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
}
@@ -1130,7 +1239,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
- mlxsw_sp->router.neighs_update.interval = interval;
+ mlxsw_sp->router->neighs_update.interval = interval;
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
@@ -1171,7 +1280,7 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
+ err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
&mlxsw_sp_neigh_ht_params);
if (err)
return err;
@@ -1182,20 +1291,20 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
/* Create the delayed works for the activity_update */
- INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
+ INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
mlxsw_sp_router_neighs_update_work);
- INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
+ INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
mlxsw_sp_router_probe_unresolved_nexthops);
- mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
- mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
return 0;
}
static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
- cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
- cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
- rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+ cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
+ rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
@@ -1270,7 +1379,7 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
&nh_grp->ht_node,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1278,7 +1387,7 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
&nh_grp->ht_node,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1287,7 +1396,7 @@ static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_key key)
{
- return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
mlxsw_sp_nexthop_group_ht_params);
}
@@ -1300,14 +1409,14 @@ static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
+ return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
&nh->ht_node, mlxsw_sp_nexthop_ht_params);
}
static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
+ rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
mlxsw_sp_nexthop_ht_params);
}
@@ -1315,7 +1424,7 @@ static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_key key)
{
- return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
mlxsw_sp_nexthop_ht_params);
}
@@ -1602,7 +1711,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
*/
if (list_empty(&neigh_entry->nexthop_list))
list_add_tail(&neigh_entry->nexthop_neighs_list_node,
- &mlxsw_sp->router.nexthop_neighs_list);
+ &mlxsw_sp->router->nexthop_neighs_list);
nh->neigh_entry = neigh_entry;
list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
@@ -1700,7 +1809,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh;
struct mlxsw_sp_rif *rif;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
key.fib_nh = fib_nh;
@@ -2513,7 +2622,7 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return 0;
fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
@@ -2553,7 +2662,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_node *fib_node;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
@@ -2584,7 +2693,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
char raltb_pl[MLXSW_REG_RALTB_LEN];
char ralue_pl[MLXSW_REG_RALUE_LEN];
@@ -2666,7 +2775,7 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
+ struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
continue;
@@ -2678,11 +2787,11 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
{
int err;
- if (mlxsw_sp->router.aborted)
+ if (mlxsw_sp->router->aborted)
return;
dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
mlxsw_sp_router_fib_flush(mlxsw_sp);
- mlxsw_sp->router.aborted = true;
+ mlxsw_sp->router->aborted = true;
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
@@ -2748,9 +2857,9 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
if (!net_eq(info->net, &init_net))
return NOTIFY_DONE;
@@ -2760,7 +2869,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_BAD;
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
- fib_work->mlxsw_sp = mlxsw_sp;
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ fib_work->mlxsw_sp = router->mlxsw_sp;
fib_work->event = event;
switch (event) {
@@ -2798,8 +2908,9 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
- return mlxsw_sp->rifs[i];
+ if (mlxsw_sp->router->rifs[i] &&
+ mlxsw_sp->router->rifs[i]->dev == dev)
+ return mlxsw_sp->router->rifs[i];
return NULL;
}
@@ -2849,77 +2960,44 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
return false;
}
+static enum mlxsw_sp_rif_type
+mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ enum mlxsw_sp_fid_type type;
+
+ /* RIF type is derived from the type of the underlying FID */
+ if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
+ type = MLXSW_SP_FID_TYPE_8021Q;
+ else if (netif_is_bridge_master(dev))
+ type = MLXSW_SP_FID_TYPE_8021D;
+ else
+ type = MLXSW_SP_FID_TYPE_RFID;
+
+ return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
+}
+
#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- if (!mlxsw_sp->rifs[i])
+ if (!mlxsw_sp->router->rifs[i])
return i;
return MLXSW_SP_INVALID_INDEX_RIF;
}
-static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
- bool *p_lagged, u16 *p_system_port)
-{
- u8 local_port = mlxsw_sp_vport->local_port;
-
- *p_lagged = mlxsw_sp_vport->lagged;
- *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
-}
-
-static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
- u16 vr_id, struct net_device *l3_dev,
- u16 rif_index, bool create)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- bool lagged = mlxsw_sp_vport->lagged;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u16 system_port;
-
- mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
- vr_id, l3_dev->mtu, l3_dev->dev_addr);
-
- mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
- mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
- mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
-
-static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
-{
- return MLXSW_SP_RFID_BASE + rif_index;
-}
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
-{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
-
- f->leave = mlxsw_sp_vport_rif_sp_leave;
- f->ref_count = 0;
- f->dev = l3_dev;
- f->fid = fid;
-
- return f;
-}
-
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
+static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
+ u16 vr_id,
+ struct net_device *l3_dev)
{
struct mlxsw_sp_rif *rif;
- rif = kzalloc(sizeof(*rif), GFP_KERNEL);
+ rif = kzalloc(rif_size, GFP_KERNEL);
if (!rif)
return NULL;
@@ -2930,11 +3008,16 @@ mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
rif->vr_id = vr_id;
rif->dev = l3_dev;
rif->rif_index = rif_index;
- rif->f = f;
return rif;
}
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index)
+{
+ return mlxsw_sp->router->rifs[rif_index];
+}
+
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
{
return rif->rif_index;
@@ -2946,152 +3029,201 @@ int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
}
static struct mlxsw_sp_rif *
-mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
+mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- u32 tb_id = l3mdev_fib_table(l3_dev);
- struct mlxsw_sp_vr *vr;
- struct mlxsw_sp_fid *f;
+ u32 tb_id = l3mdev_fib_table(params->dev);
+ const struct mlxsw_sp_rif_ops *ops;
+ enum mlxsw_sp_rif_type type;
struct mlxsw_sp_rif *rif;
- u16 fid, rif_index;
+ struct mlxsw_sp_fid *fid;
+ struct mlxsw_sp_vr *vr;
+ u16 rif_index;
int err;
- rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
- return ERR_PTR(-ERANGE);
+ type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
+ ops = mlxsw_sp->router->rif_ops_arr[type];
vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
if (IS_ERR(vr))
return ERR_CAST(vr);
- err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
- rif_index, true);
- if (err)
- goto err_vport_rif_sp_op;
-
- fid = mlxsw_sp_rif_sp_to_fid(rif_index);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- f = mlxsw_sp_rfid_alloc(fid, l3_dev);
- if (!f) {
- err = -ENOMEM;
- goto err_rfid_alloc;
+ rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif_index == MLXSW_SP_INVALID_INDEX_RIF) {
+ err = -ERANGE;
+ goto err_avail_rif_get;
}
- rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
+ rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
if (!rif) {
err = -ENOMEM;
goto err_rif_alloc;
}
+ rif->mlxsw_sp = mlxsw_sp;
+ rif->ops = ops;
- if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
- err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
- MLXSW_SP_RIF_COUNTER_EGRESS);
- if (err)
- netdev_dbg(mlxsw_sp_vport->dev,
- "Counter alloc Failed err=%d\n", err);
+ fid = ops->fid_get(rif);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ goto err_fid_get;
}
+ rif->fid = fid;
+
+ if (ops->setup)
+ ops->setup(rif, params);
- f->rif = rif;
- mlxsw_sp->rifs[rif_index] = rif;
+ err = ops->configure(rif);
+ if (err)
+ goto err_configure;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ mlxsw_sp_rif_counters_alloc(rif);
+ mlxsw_sp_fid_rif_set(fid, rif);
+ mlxsw_sp->router->rifs[rif_index] = rif;
vr->rif_count++;
return rif;
-err_rif_alloc:
- kfree(f);
-err_rfid_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
err_rif_fdb_op:
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
- false);
-err_vport_rif_sp_op:
+ ops->deconfigure(rif);
+err_configure:
+ mlxsw_sp_fid_put(fid);
+err_fid_get:
+ kfree(rif);
+err_rif_alloc:
+err_avail_rif_get:
mlxsw_sp_vr_put(vr);
return ERR_PTR(err);
}
-static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct mlxsw_sp_rif *rif)
+void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
- struct net_device *l3_dev = rif->dev;
- struct mlxsw_sp_fid *f = rif->f;
- u16 rif_index = rif->rif_index;
- u16 fid = f->fid;
+ const struct mlxsw_sp_rif_ops *ops = rif->ops;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_fid *fid = rif->fid;
+ struct mlxsw_sp_vr *vr;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
-
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
- mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
vr->rif_count--;
- mlxsw_sp->rifs[rif_index] = NULL;
- f->rif = NULL;
-
+ mlxsw_sp->router->rifs[rif->rif_index] = NULL;
+ mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_rif_counters_free(rif);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(fid), false);
+ ops->deconfigure(rif);
+ mlxsw_sp_fid_put(fid);
kfree(rif);
+ mlxsw_sp_vr_put(vr);
+}
- kfree(f);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+static void
+mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
- mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
- false);
- mlxsw_sp_vr_put(vr);
+ params->vid = mlxsw_sp_port_vlan->vid;
+ params->lag = mlxsw_sp_port->lagged;
+ if (params->lag)
+ params->lag_id = mlxsw_sp_port->lag_id;
+ else
+ params->system_port = mlxsw_sp_port->local_port;
}
-static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *l3_dev)
+static int
+mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_fid *fid;
+ int err;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif) {
- rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+
+ mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
if (IS_ERR(rif))
return PTR_ERR(rif);
}
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
- rif->f->ref_count++;
+ /* FID was already created, just take a reference */
+ fid = rif->ops->fid_get(rif);
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
- netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ BR_STATE_FORWARDING);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ mlxsw_sp_port_vlan->fid = fid;
return 0;
+
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+err_fid_port_vid_map:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
+ return;
- mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
- if (--f->ref_count == 0)
- mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ /* If router port holds the last reference on the rFID, then the
+ * associated Sub-port RIF will be destroyed.
+ */
+ mlxsw_sp_fid_put(fid);
}
-static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
- struct net_device *port_dev,
- unsigned long event, u16 vid)
+static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_vport))
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
+ l3_dev);
case NETDEV_DOWN:
- mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
@@ -3106,7 +3238,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
@@ -3119,8 +3251,9 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
if (mlxsw_sp_port_dev_check(port_dev)) {
- err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
- event, vid);
+ err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
+ port_dev,
+ event, vid);
if (err)
return err;
}
@@ -3138,189 +3271,24 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
}
-static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
-{
- u16 fid;
-
- if (is_vlan_dev(l3_dev))
- fid = vlan_dev_vlan_id(l3_dev);
- else if (mlxsw_sp->master_bridge.dev == l3_dev)
- fid = 1;
- else
- return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
-
- return mlxsw_sp_fid_find(mlxsw_sp, fid);
-}
-
-static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
-{
- return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
-}
-
-static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
- MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
-}
-
-static u16 mlxsw_sp_flood_table_index_get(u16 fid)
-{
- return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
-}
-
-static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
- bool set)
-{
- u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
- enum mlxsw_flood_table_type table_type;
- char *sftr_pl;
- u16 index;
- int err;
-
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
-
- table_type = mlxsw_sp_flood_table_type_get(fid);
- index = mlxsw_sp_flood_table_index_get(fid);
- mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
- 1, router_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-
- kfree(sftr_pl);
- return err;
-}
-
-static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
-{
- if (mlxsw_sp_fid_is_vfid(fid))
- return MLXSW_REG_RITR_FID_IF;
- else
- return MLXSW_REG_RITR_VLAN_IF;
-}
-
-static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- struct net_device *l3_dev,
- u16 fid, u16 rif,
- bool create)
-{
- enum mlxsw_reg_ritr_if_type rif_type;
- char ritr_pl[MLXSW_REG_RITR_LEN];
-
- rif_type = mlxsw_sp_rif_type_get(fid);
- mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
- l3_dev->dev_addr);
- mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev,
- struct mlxsw_sp_fid *f)
-{
- u32 tb_id = l3mdev_fib_table(l3_dev);
- struct mlxsw_sp_rif *rif;
- struct mlxsw_sp_vr *vr;
- u16 rif_index;
- int err;
-
- rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
- return -ERANGE;
-
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
- if (IS_ERR(vr))
- return PTR_ERR(vr);
-
- err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
- if (err)
- goto err_port_flood_set;
-
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
- rif_index, true);
- if (err)
- goto err_rif_bridge_op;
-
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
- if (err)
- goto err_rif_fdb_op;
-
- rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
- if (!rif) {
- err = -ENOMEM;
- goto err_rif_alloc;
- }
-
- f->rif = rif;
- mlxsw_sp->rifs[rif_index] = rif;
- vr->rif_count++;
-
- netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
-
- return 0;
-
-err_rif_alloc:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-err_rif_fdb_op:
- mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
- false);
-err_rif_bridge_op:
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-err_port_flood_set:
- mlxsw_sp_vr_put(vr);
- return err;
-}
-
-void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif)
-{
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
- struct net_device *l3_dev = rif->dev;
- struct mlxsw_sp_fid *f = rif->f;
- u16 rif_index = rif->rif_index;
-
- mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
-
- vr->rif_count--;
- mlxsw_sp->rifs[rif_index] = NULL;
- f->rif = NULL;
-
- kfree(rif);
-
- mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
-
- mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
- false);
-
- mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
-
- mlxsw_sp_vr_put(vr);
-
- netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
-}
-
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- struct net_device *br_dev,
unsigned long event)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
- struct mlxsw_sp_fid *f;
-
- /* FID can either be an actual FID if the L3 device is the
- * VLAN-aware bridge or a VLAN device on top. Otherwise, the
- * L3 device is a VLAN-unaware bridge and we get a vFID.
- */
- f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
- if (WARN_ON(!f))
- return -EINVAL;
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
+ struct mlxsw_sp_rif *rif;
switch (event) {
case NETDEV_UP:
- return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
+ break;
case NETDEV_DOWN:
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ mlxsw_sp_rif_destroy(rif);
break;
}
@@ -3331,19 +3299,16 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
unsigned long event)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
- return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
- vid);
+ return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
+ event, vid);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid);
- else if (netif_is_bridge_master(real_dev) &&
- mlxsw_sp->master_bridge.dev == real_dev)
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
- event);
+ else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
return 0;
}
@@ -3356,7 +3321,7 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
else if (netif_is_lag_master(dev))
return mlxsw_sp_inetaddr_lag_event(dev, event);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(dev, event);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(dev, event);
else
@@ -3406,6 +3371,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
+ u16 fid_index;
int err;
mlxsw_sp = mlxsw_sp_lower_get(dev);
@@ -3415,8 +3381,9 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
return 0;
+ fid_index = mlxsw_sp_fid_index(rif->fid);
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
if (err)
return err;
@@ -3425,7 +3392,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (err)
goto err_rif_edit;
- err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
if (err)
goto err_rif_fdb_op;
@@ -3439,7 +3406,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
err_rif_fdb_op:
mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
err_rif_edit:
- mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
return err;
}
@@ -3492,16 +3459,225 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static struct mlxsw_sp_rif_subport *
+mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_subport, common);
+}
+
+static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ rif_subport->vid = params->vid;
+ rif_subport->lag = params->lag;
+ if (params->lag)
+ rif_subport->lag_id = params->lag_id;
+ else
+ rif_subport->system_port = params->system_port;
+}
+
+static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_subport *rif_subport;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu,
+ rif->dev->dev_addr);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
+ rif_subport->lag ? rif_subport->lag_id :
+ rif_subport->system_port,
+ rif_subport->vid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_rif_subport_op(rif, true);
+}
+
+static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ mlxsw_sp_rif_subport_op(rif, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
+ .type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .rif_size = sizeof(struct mlxsw_sp_rif_subport),
+ .setup = mlxsw_sp_rif_subport_setup,
+ .configure = mlxsw_sp_rif_subport_configure,
+ .deconfigure = mlxsw_sp_rif_subport_deconfigure,
+ .fid_get = mlxsw_sp_rif_subport_fid_get,
+};
+
+static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 vid_fid, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ int err;
+
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ return 0;
+
+err_fid_bc_flood_set:
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+};
+
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ int err;
+
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
+ true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ return 0;
+
+err_fid_bc_flood_set:
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ return err;
+}
+
+static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
+ .type = MLXSW_SP_RIF_TYPE_FID,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_fid_fid_get,
+};
+
+static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
+ [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
+};
+
+static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+
+ mlxsw_sp->router->rifs = kcalloc(max_rifs,
+ sizeof(struct mlxsw_sp_rif *),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router->rifs)
+ return -ENOMEM;
+
+ mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
+
+ return 0;
+}
+
+static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
+
+ kfree(mlxsw_sp->router->rifs);
+}
+
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
- struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct mlxsw_sp_router *router;
/* Flush pending FIB notifications and then flush the device's
* table before requesting another dump. The FIB notification
* block is unregistered, so no need to take RTNL.
*/
mlxsw_core_flush_owq();
- mlxsw_sp_router_fib_flush(mlxsw_sp);
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+ mlxsw_sp_router_fib_flush(router->mlxsw_sp);
}
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -3512,55 +3688,50 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
-
max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
- GFP_KERNEL);
- if (!mlxsw_sp->rifs)
- return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
- goto err_rgcr_fail;
-
+ return err;
return 0;
-
-err_rgcr_fail:
- kfree(mlxsw_sp->rifs);
- return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
- int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
-
- kfree(mlxsw_sp->rifs);
}
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_router *router;
int err;
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+ mlxsw_sp->router = router;
+ router->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
- return err;
+ goto err_router_init;
+
+ err = mlxsw_sp_rifs_init(mlxsw_sp);
+ if (err)
+ goto err_rifs_init;
- err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
&mlxsw_sp_nexthop_ht_params);
if (err)
goto err_nexthop_ht_init;
- err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
+ err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
&mlxsw_sp_nexthop_group_ht_params);
if (err)
goto err_nexthop_group_ht_init;
@@ -3577,8 +3748,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_neigh_init;
- mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
- err = register_fib_notifier(&mlxsw_sp->fib_nb,
+ mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
if (err)
goto err_register_fib_notifier;
@@ -3592,21 +3763,27 @@ err_neigh_init:
err_vrs_init:
mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init:
- rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
err_nexthop_group_ht_init:
- rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
err_nexthop_ht_init:
+ mlxsw_sp_rifs_fini(mlxsw_sp);
+err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ kfree(mlxsw_sp->router);
return err;
}
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
- unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
- rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
- rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
+ rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
+ mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index c3095fef6697..a3e8d2b25148 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -42,6 +42,8 @@ enum mlxsw_sp_rif_counter_dir {
MLXSW_SP_RIF_COUNTER_EGRESS,
};
+struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
+ u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f4bb0c0b7c1d..edcc273d7597 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -52,340 +52,596 @@
#include "core.h"
#include "reg.h"
-static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
- u16 fid = vid;
+struct mlxsw_sp_bridge_ops;
+
+struct mlxsw_sp_bridge {
+ struct mlxsw_sp *mlxsw_sp;
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ bool vlan_enabled_exists;
+ struct list_head bridges_list;
+ struct list_head mids_list;
+ DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
+ const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
+};
+
+struct mlxsw_sp_bridge_device {
+ struct net_device *dev;
+ struct list_head list;
+ struct list_head ports_list;
+ u8 vlan_enabled:1,
+ multicast_enabled:1;
+ const struct mlxsw_sp_bridge_ops *ops;
+};
+
+struct mlxsw_sp_bridge_port {
+ struct net_device *dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct list_head list;
+ struct list_head vlans_list;
+ unsigned int ref_count;
+ u8 stp_state;
+ unsigned long flags;
+ bool mrouter;
+ bool lagged;
+ union {
+ u16 lag_id;
+ u16 system_port;
+ };
+};
- fid = f ? f->fid : fid;
+struct mlxsw_sp_bridge_vlan {
+ struct list_head list;
+ struct list_head port_vlan_list;
+ u16 vid;
+ u8 egress_untagged:1,
+ pvid:1;
+};
+
+struct mlxsw_sp_bridge_ops {
+ int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_fid *
+ (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid);
+};
+
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
+
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
- if (!fid)
- fid = mlxsw_sp_port->pvid;
+ list_for_each_entry(bridge_device, &bridge->bridges_list, list)
+ if (bridge_device->dev == br_dev)
+ return bridge_device;
- return fid;
+ return NULL;
}
-static struct mlxsw_sp_port *
-mlxsw_sp_port_orig_get(struct net_device *dev,
- struct mlxsw_sp_port *mlxsw_sp_port)
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
{
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_fid *fid;
- u16 vid;
+ struct device *dev = bridge->mlxsw_sp->bus_info->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ bool vlan_enabled = br_vlan_enabled(br_dev);
- if (netif_is_bridge_master(dev)) {
- fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
- dev);
- if (fid) {
- mlxsw_sp_vport =
- mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid->fid);
- WARN_ON(!mlxsw_sp_vport);
- return mlxsw_sp_vport;
- }
+ if (vlan_enabled && bridge->vlan_enabled_exists) {
+ dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
}
- if (!is_vlan_dev(dev))
- return mlxsw_sp_port;
+ bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
+ if (!bridge_device)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_device->dev = br_dev;
+ bridge_device->vlan_enabled = vlan_enabled;
+ bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ INIT_LIST_HEAD(&bridge_device->ports_list);
+ if (vlan_enabled) {
+ bridge->vlan_enabled_exists = true;
+ bridge_device->ops = bridge->bridge_8021q_ops;
+ } else {
+ bridge_device->ops = bridge->bridge_8021d_ops;
+ }
+ list_add(&bridge_device->list, &bridge->bridges_list);
- vid = vlan_dev_vlan_id(dev);
- mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- WARN_ON(!mlxsw_sp_vport);
+ return bridge_device;
+}
- return mlxsw_sp_vport;
+static void
+mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ WARN_ON(!list_empty(&bridge_device->ports_list));
+ kfree(bridge_device);
}
-static int mlxsw_sp_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
+static struct mlxsw_sp_bridge_device *
+mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (bridge_device)
+ return bridge_device;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
- attr->u.ppid.id_len);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- attr->u.brport_flags =
- (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
- (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
- (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
- break;
- default:
- return -EOPNOTSUPP;
+ return mlxsw_sp_bridge_device_create(bridge, br_dev);
+}
+
+static void
+mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ if (list_empty(&bridge_device->ports_list))
+ mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
+}
+
+static struct mlxsw_sp_bridge_port *
+__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->dev == brport_dev)
+ return bridge_port;
}
- return 0;
+ return NULL;
}
-static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 state)
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- enum mlxsw_reg_spms_state spms_state;
- char *spms_pl;
- u16 vid;
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
+ if (!bridge_device)
+ return NULL;
+
+ return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
+ struct net_device *brport_dev)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
+ if (!bridge_port)
+ return NULL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
+ bridge_port->lagged = mlxsw_sp_port->lagged;
+ if (bridge_port->lagged)
+ bridge_port->lag_id = mlxsw_sp_port->lag_id;
+ else
+ bridge_port->system_port = mlxsw_sp_port->local_port;
+ bridge_port->dev = brport_dev;
+ bridge_port->bridge_device = bridge_device;
+ bridge_port->stp_state = BR_STATE_DISABLED;
+ bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
+ INIT_LIST_HEAD(&bridge_port->vlans_list);
+ list_add(&bridge_port->list, &bridge_device->ports_list);
+ bridge_port->ref_count = 1;
+
+ return bridge_port;
+}
+
+static void
+mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
+{
+ list_del(&bridge_port->list);
+ WARN_ON(!list_empty(&bridge_port->vlans_list));
+ kfree(bridge_port);
+}
+
+static bool
+mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
+ bridge_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
+
+ /* In case ports were pulled from out of a bridged LAG, then
+ * it's possible the reference count isn't zero, yet the bridge
+ * port should be destroyed, as it's no longer an upper of ours.
+ */
+ if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
+ return true;
+ else if (bridge_port->ref_count == 0)
+ return true;
+ else
+ return false;
+}
+
+static struct mlxsw_sp_bridge_port *
+mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
int err;
- switch (state) {
- case BR_STATE_FORWARDING:
- spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
- break;
- case BR_STATE_LEARNING:
- spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
- break;
- case BR_STATE_LISTENING: /* fall-through */
- case BR_STATE_DISABLED: /* fall-through */
- case BR_STATE_BLOCKING:
- spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
- break;
- default:
- BUG();
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
+ if (bridge_port) {
+ bridge_port->ref_count++;
+ return bridge_port;
}
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
+ if (IS_ERR(bridge_device))
+ return ERR_CAST(bridge_device);
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
- } else {
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+ bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
+ if (!bridge_port) {
+ err = -ENOMEM;
+ goto err_bridge_port_create;
}
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
+ return bridge_port;
+
+err_bridge_port_create:
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- u8 state)
+static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_sp_port->stp_state = state;
- return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+ bridge_port->ref_count--;
+ if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+ return;
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_bridge_port_destroy(bridge_port);
+ mlxsw_sp_bridge_device_put(bridge, bridge_device);
}
-static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end,
- enum mlxsw_sp_flood_table table,
- bool set)
+static struct mlxsw_sp_port_vlan *
+mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_bridge_device *
+ bridge_device,
+ u16 vid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u16 local_port = mlxsw_sp_port->local_port;
- enum mlxsw_flood_table_type table_type;
- u16 range = idx_end - idx_begin + 1;
- char *sftr_pl;
- int err;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- else
- table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (!mlxsw_sp_port_vlan->bridge_port)
+ continue;
+ if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
+ bridge_device)
+ continue;
+ if (bridge_device->vlan_enabled &&
+ mlxsw_sp_port_vlan->vid != vid)
+ continue;
+ return mlxsw_sp_port_vlan;
+ }
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
- return -ENOMEM;
+ return NULL;
+}
- mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
- table_type, range, local_port, set);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+static struct mlxsw_sp_port_vlan*
+mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_index)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- kfree(sftr_pl);
- return err;
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+
+ if (fid && mlxsw_sp_fid_index(fid) == fid_index)
+ return mlxsw_sp_port_vlan;
+ }
+
+ return NULL;
}
-static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool uc_set,
- bool bc_set, bool mc_set)
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid)
{
- int err;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_UC, uc_set);
- if (err)
- return err;
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ if (bridge_vlan->vid == vid)
+ return bridge_vlan;
+ }
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_BC, bc_set);
- if (err)
- goto err_flood_bm_set;
+ return NULL;
+}
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_MC, mc_set);
- if (err)
- goto err_flood_mc_set;
- return 0;
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
-err_flood_mc_set:
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
-err_flood_bm_set:
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
- MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
- return err;
+ bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
+ if (!bridge_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
+ bridge_vlan->vid = vid;
+ list_add(&bridge_vlan->list, &bridge_port->vlans_list);
+
+ return bridge_vlan;
}
-static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
- enum mlxsw_sp_flood_table table,
- bool set)
+static void
+mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
{
- struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, last_visited_vid;
- int err;
+ list_del(&bridge_vlan->list);
+ WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
+ kfree(bridge_vlan);
+}
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
+static struct mlxsw_sp_bridge_vlan *
+mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
- return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
- vfid, table, set);
- }
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ if (bridge_vlan)
+ return bridge_vlan;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
- table, set);
- if (err) {
- last_visited_vid = vid;
- goto err_port_flood_set;
- }
+ return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
+}
+
+static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
+{
+ if (list_empty(&bridge_vlan->port_vlan_list))
+ mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
+}
+
+static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *dev,
+ unsigned long *brport_flags)
+{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
+ if (WARN_ON(!bridge_port))
+ return;
+
+ memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
+}
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
+ &attr->u.brport_flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
return 0;
+}
-err_port_flood_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
- __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
- !set);
- netdev_err(dev, "Failed to configure unicast flooding\n");
- return err;
+static int
+mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ u8 state)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
+ bridge_vlan->vid, state);
+ }
+
+ return 0;
}
-static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- bool mc_disabled)
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ u8 state)
{
- int set;
- int err = 0;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
if (switchdev_trans_ph_prepare(trans))
return 0;
- if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
- set = mc_disabled ?
- mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
- err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_MC,
- set);
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (!bridge_port)
+ return 0;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
+ bridge_vlan, state);
+ if (err)
+ goto err_port_bridge_vlan_stp_set;
}
- if (!err)
- mlxsw_sp_port->mc_disabled = mc_disabled;
+ bridge_port->stp_state = state;
+ return 0;
+
+err_port_bridge_vlan_stp_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
+ bridge_port->stp_state);
return err;
}
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
- bool set)
+static int
+mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
{
- bool mc_set = set;
- u16 vfid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- /* In case of vFIDs, index into the flooding table is relative to
- * the start of the vFIDs range.
- */
- vfid = mlxsw_sp_fid_to_vfid(fid);
-
- if (set)
- mc_set = mlxsw_sp_vport->mc_disabled ?
- mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type,
+ mlxsw_sp_port->local_port,
+ member);
+ }
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
- mc_set);
+ return 0;
}
-static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool set)
+static int
+mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
{
- u16 vid;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
+ bridge_vlan,
+ packet_type,
+ member);
+ if (err)
+ goto err_port_bridge_vlan_flood_set;
+ }
- return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- set);
+ return 0;
+
+err_port_bridge_vlan_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
+ packet_type, !member);
+ return err;
+}
+
+static int
+mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ bool set)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = bridge_vlan->vid;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
+ continue;
+ return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
}
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
- err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
- set);
+ return 0;
+}
+
+static int
+mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool set)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, set);
if (err)
- goto err_port_vid_learning_set;
+ goto err_port_bridge_vlan_learning_set;
}
return 0;
-err_port_vid_learning_set:
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
+err_port_bridge_vlan_learning_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
+ bridge_vlan, !set);
return err;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
unsigned long brport_flags)
{
- unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
- unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
if (switchdev_trans_ph_prepare(trans))
return 0;
- if ((uc_flood ^ brport_flags) & BR_FLOOD) {
- err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_UC,
- !mlxsw_sp_port->uc_flood);
- if (err)
- return err;
- }
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
- if ((learning ^ brport_flags) & BR_LEARNING) {
- err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
- !mlxsw_sp_port->learning);
- if (err)
- goto err_port_learning_set;
- }
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ brport_flags & BR_FLOOD);
+ if (err)
+ return err;
- mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
- mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
- mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
+ brport_flags & BR_LEARNING);
+ if (err)
+ return err;
- return 0;
+ memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
-err_port_learning_set:
- if ((uc_flood ^ brport_flags) & BR_FLOOD)
- mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_UC,
- mlxsw_sp_port->uc_flood);
- return err;
+ return 0;
}
static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
@@ -397,7 +653,7 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
if (err)
return err;
- mlxsw_sp->ageing_time = ageing_time;
+ mlxsw_sp->bridge->ageing_time = ageing_time;
return 0;
}
@@ -426,28 +682,77 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
- /* SWITCHDEV_TRANS_PREPARE phase */
- if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
- netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
return -EINVAL;
- }
- return 0;
+ if (bridge_device->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
+ return -EINVAL;
}
static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
+ struct net_device *orig_dev,
bool is_port_mc_router)
{
+ struct mlxsw_sp_bridge_port *bridge_port;
+
if (switchdev_trans_ph_prepare(trans))
return 0;
- mlxsw_sp_port->mc_router = is_port_mc_router;
- if (!mlxsw_sp_port->mc_disabled)
- return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
- MLXSW_SP_FLOOD_TABLE_MC,
- is_port_mc_router);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
+ orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->multicast_enabled)
+ return 0;
+
+ return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ is_port_mc_router);
+}
+
+static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
+ bool member = mc_disabled ? true : bridge_port->mrouter;
+
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ packet_type, member);
+ if (err)
+ return err;
+ }
+
+ bridge_device->multicast_enabled = !mc_disabled;
return 0;
}
@@ -457,19 +762,17 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
struct switchdev_trans *trans)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- int err = 0;
-
- mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
+ int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
@@ -483,10 +786,12 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
attr->u.mc_disabled);
break;
default:
@@ -497,268 +802,245 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ const struct mlxsw_sp_bridge_device *bridge_device;
- mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+ bridge_device = bridge_port->bridge_device;
+ return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
}
-static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
-static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
+static int
+mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- struct mlxsw_sp_fid *f;
-
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ struct mlxsw_sp_fid *fid;
+ int err;
- f->fid = fid;
+ bridge_device = bridge_port->bridge_device;
+ fid = bridge_device->ops->fid_get(bridge_device, vid);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
- return f;
-}
-
-struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- struct mlxsw_sp_fid *f;
- int err;
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
+ bridge_port->flags & BR_FLOOD);
+ if (err)
+ goto err_fid_uc_flood_set;
- err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
+ mlxsw_sp_mc_flood(bridge_port));
if (err)
- return ERR_PTR(err);
+ goto err_fid_mc_flood_set;
- /* Although all the ports member in the FID might be using a
- * {Port, VID} to FID mapping, we create a global VID-to-FID
- * mapping. This allows a port to transition to VLAN mode,
- * knowing the global mapping exists.
- */
- err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
+ err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
+ true);
if (err)
- goto err_fid_map;
+ goto err_fid_bc_flood_set;
- f = mlxsw_sp_fid_alloc(fid);
- if (!f) {
- err = -ENOMEM;
- goto err_allocate_fid;
- }
+ err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
+ if (err)
+ goto err_fid_port_vid_map;
- list_add(&f->list, &mlxsw_sp->fids);
+ mlxsw_sp_port_vlan->fid = fid;
- return f;
+ return 0;
-err_allocate_fid:
- mlxsw_sp_fid_map(mlxsw_sp, fid, false);
-err_fid_map:
- mlxsw_sp_fid_op(mlxsw_sp, fid, false);
- return ERR_PTR(err);
+err_fid_port_vid_map:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+err_fid_mc_flood_set:
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+err_fid_uc_flood_set:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
+static void
+mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- u16 fid = f->fid;
-
- list_del(&f->list);
-
- if (f->rif)
- mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
-
- kfree(f);
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
- mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+ mlxsw_sp_port_vlan->fid = NULL;
+ mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
+ mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
+ mlxsw_sp_fid_put(fid);
+}
- mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+static u16
+mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool is_pvid)
+{
+ if (is_pvid)
+ return vid;
+ else if (mlxsw_sp_port->pvid == vid)
+ return 0; /* Dis-allow untagged packets */
+ else
+ return mlxsw_sp_port->pvid;
}
-static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
+static int
+mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct mlxsw_sp_bridge_port *bridge_port)
{
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ int err;
- if (test_bit(fid, mlxsw_sp_port->active_vlans))
+ /* No need to continue if only VLAN flags were changed */
+ if (mlxsw_sp_port_vlan->bridge_port)
return 0;
- f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
- if (!f) {
- f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
- if (IS_ERR(f))
- return PTR_ERR(f);
+ err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
+ bridge_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_vid_learning_set;
+
+ err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
+ bridge_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
+ if (!bridge_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
}
- f->ref_count++;
+ list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
+ &bridge_vlan->port_vlan_list);
- netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
+ mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
+ bridge_port->dev);
+ mlxsw_sp_port_vlan->bridge_port = bridge_port;
return 0;
+
+err_bridge_vlan_get:
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+err_port_vid_stp_set:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
+ return err;
}
-static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid)
+void
+mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
- struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
+ struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid = mlxsw_sp_port_vlan->vid;
+ bool last;
- f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
- if (WARN_ON(!f))
+ if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
+ mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
- netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ last = list_is_singular(&bridge_vlan->port_vlan_list);
- mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
+ list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
+ mlxsw_sp_bridge_vlan_put(bridge_vlan);
+ mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (last)
+ mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
+ bridge_port,
+ mlxsw_sp_fid_index(fid));
+ mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
- if (--f->ref_count == 0)
- mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
+ mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
+ mlxsw_sp_port_vlan->bridge_port = NULL;
}
-static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
- bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
-
- /* If port doesn't have vPorts, then it can use the global
- * VID-to-FID mapping.
- */
- if (list_empty(&mlxsw_sp_port->vports_list))
- return 0;
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
-}
+static int
+mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 vid, bool is_untagged, bool is_pvid)
+{
+ u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ u16 old_pvid = mlxsw_sp_port->pvid;
+ int err;
-static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end)
-{
- bool mc_flood;
- int fid, err;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
+ if (IS_ERR(mlxsw_sp_port_vlan))
+ return PTR_ERR(mlxsw_sp_port_vlan);
- for (fid = fid_begin; fid <= fid_end; fid++) {
- err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
- if (err)
- goto err_port_fid_join;
- }
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
+ is_untagged);
+ if (err)
+ goto err_port_vlan_set;
- mc_flood = mlxsw_sp_port->mc_disabled ?
- mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ if (err)
+ goto err_port_pvid_set;
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
- mlxsw_sp_port->uc_flood, true,
- mc_flood);
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
if (err)
- goto err_port_flood_set;
+ goto err_port_vlan_bridge_join;
- for (fid = fid_begin; fid <= fid_end; fid++) {
- err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
- if (err)
- goto err_port_fid_map;
- }
+ bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
+ bridge_vlan->egress_untagged = is_untagged;
+ bridge_vlan->pvid = is_pvid;
return 0;
-err_port_fid_map:
- for (fid--; fid >= fid_begin; fid--)
- mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false, false);
-err_port_flood_set:
- fid = fid_end;
-err_port_fid_join:
- for (fid--; fid >= fid_begin; fid--)
- __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+err_port_vlan_bridge_join:
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
+err_port_pvid_set:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_vlan_set:
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
return err;
}
-static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 fid_begin, u16 fid_end)
-{
- int fid;
-
- for (fid = fid_begin; fid <= fid_end; fid++)
- mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
-
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
- false, false);
-
- for (fid = fid_begin; fid <= fid_end; fid++)
- __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
-}
-
-static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char spvid_pl[MLXSW_REG_SPVID_LEN];
-
- mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
-}
-
-static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
- bool allow)
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char spaft_pl[MLXSW_REG_SPAFT_LEN];
-
- mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
-}
-
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (!vid) {
- err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to disallow untagged traffic\n");
- return err;
- }
- } else {
- err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to set PVID\n");
- return err;
- }
-
- /* Only allow if not already allowed. */
- if (!mlxsw_sp_port->pvid) {
- err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
- true);
- if (err) {
- netdev_err(dev, "Failed to allow untagged traffic\n");
- goto err_port_allow_untagged_set;
- }
- }
- }
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 vid;
- mlxsw_sp_port->pvid = vid;
- return 0;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
-err_port_allow_untagged_set:
- __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
- return err;
-}
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
-static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool learn_enable)
-{
- u16 vid, vid_e;
- int err;
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
- for (vid = vid_begin; vid <= vid_end;
- vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
- vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
- vid_end);
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
- err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
- vid_e, learn_enable);
+ err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vid, flag_untagged,
+ flag_pvid);
if (err)
return err;
}
@@ -766,102 +1048,27 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end,
- bool flag_untagged, bool flag_pvid)
+static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
{
- struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, old_pvid;
- int err;
-
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
- err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
- if (err) {
- netdev_err(dev, "Failed to join FIDs\n");
- return err;
- }
-
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- true, flag_untagged);
- if (err) {
- netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
- vid_end);
- goto err_port_vlans_set;
- }
-
- old_pvid = mlxsw_sp_port->pvid;
- if (flag_pvid && old_pvid != vid_begin) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
- if (err) {
- netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
- goto err_port_pvid_set;
- }
- } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
- if (err) {
- netdev_err(dev, "Unable to del PVID\n");
- goto err_port_pvid_set;
- }
- }
-
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- mlxsw_sp_port->learning);
- if (err) {
- netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
- vid_begin, vid_end);
- goto err_port_vid_learning_set;
- }
-
- /* Changing activity bits only if HW operation succeded */
- for (vid = vid_begin; vid <= vid_end; vid++) {
- set_bit(vid, mlxsw_sp_port->active_vlans);
- if (flag_untagged)
- set_bit(vid, mlxsw_sp_port->untagged_vlans);
- else
- clear_bit(vid, mlxsw_sp_port->untagged_vlans);
- }
-
- /* STP state change must be done after we set active VLANs */
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
- mlxsw_sp_port->stp_state);
- if (err) {
- netdev_err(dev, "Failed to set STP state\n");
- goto err_port_stp_state_set;
- }
-
- return 0;
-
-err_port_stp_state_set:
- for (vid = vid_begin; vid <= vid_end; vid++)
- clear_bit(vid, mlxsw_sp_port->active_vlans);
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- false);
-err_port_vid_learning_set:
- if (old_pvid != mlxsw_sp_port->pvid)
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
-err_port_pvid_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
-err_port_vlans_set:
- mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
- return err;
+ return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
+ MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
}
-static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int
+mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
- bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ bool lagged = bridge_port->lagged;
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u16 system_port;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
+ mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
- return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end,
- flag_untagged, flag_pvid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
@@ -939,24 +1146,39 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
- u16 lag_vid = 0;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = fdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 fid_index, vid;
if (switchdev_trans_ph_prepare(trans))
return 0;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- }
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ fdb->vid);
+ if (!mlxsw_sp_port_vlan)
+ return 0;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+ vid = mlxsw_sp_port_vlan->vid;
if (!mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
+ return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
mlxsw_sp_port->local_port,
- fdb->addr, fid, true, false);
+ fdb->addr, fid_index, true,
+ false);
else
- return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
+ return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
mlxsw_sp_port->lag_id,
- fdb->addr, fid, lag_vid,
+ fdb->addr, fid_index, vid,
true, false);
}
@@ -1006,7 +1228,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_mid *mid;
- list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
+ list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
@@ -1020,7 +1242,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mid *mid;
u16 mid_idx;
- mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
+ mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
MLXSW_SP_MID_MAX);
if (mid_idx == MLXSW_SP_MID_MAX)
return NULL;
@@ -1029,12 +1251,12 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
if (!mid)
return NULL;
- set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
+ set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
ether_addr_copy(mid->addr, addr);
mid->fid = fid;
mid->mid = mid_idx;
mid->ref_count = 0;
- list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
+ list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
return mid;
}
@@ -1044,7 +1266,7 @@ static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
{
if (--mid->ref_count == 0) {
list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
kfree(mid);
return 1;
}
@@ -1056,17 +1278,34 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
+ u16 fid_index;
int err = 0;
if (switchdev_trans_ph_prepare(trans))
return 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
@@ -1082,8 +1321,8 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
}
if (mid->ref_count == 1) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
- true);
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
+ mid->mid, true);
if (err) {
netdev_err(dev, "Unable to set MC SFD\n");
goto err_out;
@@ -1104,15 +1343,8 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- return 0;
-
err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
@@ -1135,68 +1367,78 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
return err;
}
-static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end)
+static void
+mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
{
- u16 vid, pvid;
-
- if (!mlxsw_sp_port->bridged)
- return -EINVAL;
-
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
- false);
+ u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end)
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
-
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
-
- mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
-
- /* Changing activity bits only if HW operation succeded */
- for (vid = vid_begin; vid <= vid_end; vid++)
- clear_bit(vid, mlxsw_sp_port->active_vlans);
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
- return 0;
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
- vlan->vid_end);
-}
-
-void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
-{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
u16 vid;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
+
+ return 0;
}
static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb)
{
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
- u16 lag_vid = 0;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = fdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 fid_index, vid;
+
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- }
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ fdb->vid);
+ if (!mlxsw_sp_port_vlan)
+ return 0;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+ vid = mlxsw_sp_port_vlan->vid;
if (!mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
+ return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
mlxsw_sp_port->local_port,
- fdb->addr, fid,
- false, false);
+ fdb->addr, fid_index, false,
+ false);
else
- return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
+ return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
mlxsw_sp_port->lag_id,
- fdb->addr, fid, lag_vid,
+ fdb->addr, fid_index, vid,
false, false);
}
@@ -1204,13 +1446,30 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
- u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
+ u16 fid_index;
u16 mid_idx;
int err = 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
+ bridge_device,
+ mdb->vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
+
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
@@ -1222,8 +1481,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
mid_idx = mid->mid;
if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
- false);
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
+ mid_idx, false);
if (err)
netdev_err(dev, "Unable to remove MC SFD\n");
}
@@ -1237,15 +1496,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
- return 0;
-
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
@@ -1284,32 +1536,32 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb,
- struct net_device *orig_dev)
+ switchdev_obj_dump_cb_t *cb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port *tmp;
- struct mlxsw_sp_fid *f;
- u16 vport_fid;
- char *sfd_pl;
+ struct net_device *orig_dev = fdb->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ u16 lag_id, fid_index;
char mac[ETH_ALEN];
- u16 fid;
- u8 local_port;
- u16 lag_id;
- u8 num_rec;
int stored_err = 0;
- int i;
+ char *sfd_pl;
+ u8 num_rec;
int err;
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_port)
+ return 0;
+
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl)
return -ENOMEM;
- f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
- vport_fid = f ? f->fid : 0;
-
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do {
+ struct mlxsw_sp_port *tmp;
+ u8 local_port;
+ int i;
+
mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1326,48 +1578,44 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
for (i = 0; i < num_rec; i++) {
switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
case MLXSW_REG_SFD_REC_TYPE_UNICAST:
- mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
+ mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac,
+ &fid_index,
&local_port);
- if (local_port == mlxsw_sp_port->local_port) {
- if (vport_fid && vport_fid == fid)
- fdb->vid = 0;
- else if (!vport_fid &&
- !mlxsw_sp_fid_is_vfid(fid))
- fdb->vid = fid;
- else
- continue;
- ether_addr_copy(fdb->addr, mac);
- fdb->ndm_state = NUD_REACHABLE;
- err = cb(&fdb->obj);
- if (err)
- stored_err = err;
- }
+ if (bridge_port->lagged)
+ continue;
+ if (bridge_port->system_port != local_port)
+ continue;
+ if (bridge_port->bridge_device->vlan_enabled)
+ fdb->vid = fid_index;
+ else
+ fdb->vid = 0;
+ ether_addr_copy(fdb->addr, mac);
+ fdb->ndm_state = NUD_REACHABLE;
+ err = cb(&fdb->obj);
+ if (err)
+ stored_err = err;
break;
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
- mac, &fid, &lag_id);
+ mac, &fid_index,
+ &lag_id);
+ if (!bridge_port->lagged)
+ continue;
+ if (bridge_port->lag_id != lag_id)
+ continue;
tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
- if (tmp && tmp->local_port ==
- mlxsw_sp_port->local_port) {
- /* LAG records can only point to LAG
- * devices or VLAN devices on top.
- */
- if (!netif_is_lag_master(orig_dev) &&
- !is_vlan_dev(orig_dev))
- continue;
- if (vport_fid && vport_fid == fid)
- fdb->vid = 0;
- else if (!vport_fid &&
- !mlxsw_sp_fid_is_vfid(fid))
- fdb->vid = fid;
- else
- continue;
- ether_addr_copy(fdb->addr, mac);
- fdb->ndm_state = NUD_REACHABLE;
- err = cb(&fdb->obj);
- if (err)
- stored_err = err;
- }
+ if (tmp->local_port !=
+ mlxsw_sp_port->local_port)
+ continue;
+ if (bridge_port->bridge_device->vlan_enabled)
+ fdb->vid = fid_index;
+ else
+ fdb->vid = 0;
+ ether_addr_copy(fdb->addr, mac);
+ fdb->ndm_state = NUD_REACHABLE;
+ err = cb(&fdb->obj);
+ if (err)
+ stored_err = err;
break;
}
}
@@ -1382,28 +1630,32 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_vlan *vlan,
switchdev_obj_dump_cb_t *cb)
{
- u16 vid;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err = 0;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- vlan->flags = 0;
- vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
- return cb(&vlan->obj);
- }
+ bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_port))
+ return -EINVAL;
+
+ if (!bridge_port->bridge_device->vlan_enabled)
+ return 0;
- for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
vlan->flags = 0;
- if (vid == mlxsw_sp_port->pvid)
+ if (bridge_vlan->pvid)
vlan->flags |= BRIDGE_VLAN_INFO_PVID;
- if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
+ if (bridge_vlan->egress_untagged)
vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
- vlan->vid_begin = vid;
- vlan->vid_end = vid;
+ vlan->vid_begin = bridge_vlan->vid;
+ vlan->vid_end = bridge_vlan->vid;
err = cb(&vlan->obj);
if (err)
break;
}
+
return err;
}
@@ -1414,10 +1666,6 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err = 0;
- mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
- if (!mlxsw_sp_port)
- return -EINVAL;
-
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
@@ -1425,8 +1673,7 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_FDB(obj), cb,
- obj->orig_dev);
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb);
break;
default:
err = -EOPNOTSUPP;
@@ -1444,6 +1691,172 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
};
+static int
+mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ if (is_vlan_dev(bridge_port->dev))
+ return -EINVAL;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ /* Let VLAN-aware bridge take care of its own VLANs */
+ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
+ .port_join = mlxsw_sp_bridge_8021q_port_join,
+ .port_leave = mlxsw_sp_bridge_8021q_port_leave,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+};
+
+static bool
+mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
+ list) {
+ if (mlxsw_sp_port_vlan->bridge_port &&
+ mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
+ br_dev)
+ return true;
+ }
+
+ return false;
+}
+
+static int
+mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid;
+
+ if (!is_vlan_dev(bridge_port->dev))
+ return -EINVAL;
+ vid = vlan_dev_vlan_id(bridge_port->dev);
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
+ netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
+ return -EINVAL;
+ }
+
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+
+ return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+}
+
+static void
+mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
+
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
+ .port_join = mlxsw_sp_bridge_8021d_port_join,
+ .port_leave = mlxsw_sp_bridge_8021d_port_leave,
+ .fid_get = mlxsw_sp_bridge_8021d_fid_get,
+};
+
+int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+ int err;
+
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
+ if (IS_ERR(bridge_port))
+ return PTR_ERR(bridge_port);
+ bridge_device = bridge_port->bridge_device;
+
+ err = bridge_device->ops->port_join(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+ return err;
+}
+
+void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *brport_dev,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+ bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
+ if (!bridge_port)
+ return;
+
+ bridge_device->ops->port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
+ mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
+}
+
static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
char *mac, u16 vid,
struct net_device *dev)
@@ -1463,6 +1876,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
char mac[ETH_ALEN];
u8 local_port;
@@ -1477,22 +1893,21 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_vfid(fid)) {
- struct mlxsw_sp_port *mlxsw_sp_vport;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
+ }
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid);
- if (!mlxsw_sp_vport) {
- netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
- goto just_remove;
- }
- vid = 0;
- /* Override the physical port with the vPort. */
- mlxsw_sp_port = mlxsw_sp_vport;
- } else {
- vid = fid;
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
}
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
adding, true);
@@ -1503,8 +1918,8 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
- adding, mac, vid, mlxsw_sp_port->dev);
+ mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC,
+ adding, mac, vid, bridge_port->dev);
return;
just_remove:
@@ -1517,8 +1932,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index,
bool adding)
{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
- struct net_device *dev;
char mac[ETH_ALEN];
u16 lag_vid = 0;
u16 lag_id;
@@ -1533,26 +1950,22 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_vfid(fid)) {
- struct mlxsw_sp_port *mlxsw_sp_vport;
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
- fid);
- if (!mlxsw_sp_vport) {
- netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
- goto just_remove;
- }
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+ goto just_remove;
+ }
- lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- dev = mlxsw_sp_vport->dev;
- vid = 0;
- /* Override the physical port with the vPort. */
- mlxsw_sp_port = mlxsw_sp_vport;
- } else {
- dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
- vid = fid;
+ bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ if (!bridge_port) {
+ netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
+ goto just_remove;
}
+ bridge_device = bridge_port->bridge_device;
+ vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
+
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
adding, true);
@@ -1563,8 +1976,8 @@ do_fdb_op:
if (!do_notification)
return;
- mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
- vid, dev);
+ mlxsw_sp_fdb_call_notifiers(bridge_port->flags & BR_LEARNING_SYNC,
+ adding, mac, vid, bridge_port->dev);
return;
just_remove:
@@ -1598,12 +2011,15 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
- msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+
+ mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
+ msecs_to_jiffies(bridge->fdb_notify.interval));
}
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
+ struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
char *sfn_pl;
u8 num_rec;
@@ -1614,7 +2030,8 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
if (!sfn_pl)
return;
- mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+ bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
+ mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
mlxsw_reg_sfn_pack(sfn_pl);
@@ -1635,6 +2052,7 @@ out:
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
int err;
err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -1642,25 +2060,42 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
return err;
}
- INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
- mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+ INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
return 0;
}
static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
{
- cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+ mlxsw_sp->bridge = bridge;
+ bridge->mlxsw_sp = mlxsw_sp;
+
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
+ INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
+
+ bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
+ bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
+
return mlxsw_sp_fdb_init(mlxsw_sp);
}
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
+ WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
+ kfree(mlxsw_sp->bridge);
}
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 6a4310af5d97..50ea69d88480 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3218,6 +3218,7 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
return -EFAULT;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4b15f0f496aa..95f6b97b5d71 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -14,12 +14,15 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_app.o \
+ nfp_devlink.o \
nfp_main.o \
nfp_net_common.o \
nfp_net_ethtool.o \
nfp_net_offload.o \
nfp_net_main.o \
- nfp_netvf_main.o
+ nfp_netvf_main.o \
+ nfp_port.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
new file mode 100644
index 000000000000..59be638bb60e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "nfp_app.h"
+#include "nfp_main.h"
+
+struct nfp_app *nfp_app_alloc(struct nfp_pf *pf)
+{
+ struct nfp_app *app;
+
+ app = kzalloc(sizeof(*app), GFP_KERNEL);
+ if (!app)
+ return ERR_PTR(-ENOMEM);
+
+ app->pf = pf;
+ app->cpp = pf->cpp;
+ app->pdev = pf->pdev;
+
+ return app;
+}
+
+void nfp_app_free(struct nfp_app *app)
+{
+ kfree(app);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
new file mode 100644
index 000000000000..e63425c02c8d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _NFP_APP_H
+#define _NFP_APP_H 1
+
+struct pci_dev;
+struct nfp_cpp;
+struct nfp_pf;
+
+/**
+ * struct nfp_app - NFP application container
+ * @pdev: backpointer to PCI device
+ * @pf: backpointer to NFP PF structure
+ * @cpp: pointer to the CPP handle
+ */
+struct nfp_app {
+ struct pci_dev *pdev;
+ struct nfp_pf *pf;
+ struct nfp_cpp *cpp;
+};
+
+struct nfp_app *nfp_app_alloc(struct nfp_pf *pf);
+void nfp_app_free(struct nfp_app *app);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
new file mode 100644
index 000000000000..2609a0f28e81
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rtnetlink.h>
+#include <net/devlink.h>
+
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_port.h"
+
+static int
+nfp_devlink_fill_eth_port(struct nfp_port *port,
+ struct nfp_eth_table_port *copy)
+{
+ struct nfp_eth_table_port *eth_port;
+
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EINVAL;
+
+ memcpy(copy, eth_port, sizeof(*eth_port));
+
+ return 0;
+}
+
+static int
+nfp_devlink_fill_eth_port_from_id(struct nfp_pf *pf, unsigned int port_index,
+ struct nfp_eth_table_port *copy)
+{
+ struct nfp_port *port;
+
+ port = nfp_port_from_id(pf, NFP_PORT_PHYS_PORT, port_index);
+
+ return nfp_devlink_fill_eth_port(port, copy);
+}
+
+static int
+nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes)
+{
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_eth_config_start(pf->cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ ret = __nfp_eth_set_split(nsp, lanes);
+ if (ret) {
+ nfp_eth_config_cleanup_end(nsp);
+ return ret;
+ }
+
+ ret = nfp_eth_config_commit_end(nsp);
+ if (ret < 0)
+ return ret;
+ if (ret) /* no change */
+ return 0;
+
+ return nfp_net_refresh_port_table_sync(pf);
+}
+
+static int
+nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
+ unsigned int count)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_eth_table_port eth_port;
+ int ret;
+
+ if (count < 2)
+ return -EINVAL;
+
+ mutex_lock(&pf->lock);
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ rtnl_unlock();
+ if (ret)
+ goto out;
+
+ if (eth_port.is_split || eth_port.port_lanes % count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index,
+ eth_port.port_lanes / count);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+static int
+nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
+{
+ struct nfp_pf *pf = devlink_priv(devlink);
+ struct nfp_eth_table_port eth_port;
+ int ret;
+
+ mutex_lock(&pf->lock);
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, &eth_port);
+ rtnl_unlock();
+ if (ret)
+ goto out;
+
+ if (!eth_port.is_split) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
+out:
+ mutex_unlock(&pf->lock);
+
+ return ret;
+}
+
+const struct devlink_ops nfp_devlink_ops = {
+ .port_split = nfp_devlink_port_split,
+ .port_unsplit = nfp_devlink_port_unsplit,
+};
+
+int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
+{
+ struct nfp_eth_table_port eth_port;
+ struct devlink *devlink;
+ int ret;
+
+ rtnl_lock();
+ ret = nfp_devlink_fill_eth_port(port, &eth_port);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ if (eth_port.is_split)
+ devlink_port_split_set(&port->dl_port, eth_port.label_port);
+
+ devlink = priv_to_devlink(app->pf);
+
+ return devlink_port_register(devlink, &port->dl_port, port->eth_id);
+}
+
+void nfp_devlink_port_unregister(struct nfp_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index dde35dae35c5..f22f56c9218f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -41,9 +41,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/vermagic.h>
+#include <net/devlink.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_cpp.h"
@@ -315,6 +317,7 @@ static void nfp_fw_unload(struct nfp_pf *pf)
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct devlink *devlink;
struct nfp_pf *pf;
int err;
@@ -335,12 +338,15 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_pci_disable;
}
- pf = kzalloc(sizeof(*pf), GFP_KERNEL);
- if (!pf) {
+ devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf));
+ if (!devlink) {
err = -ENOMEM;
goto err_rel_regions;
}
+ pf = devlink_priv(devlink);
+ INIT_LIST_HEAD(&pf->vnics);
INIT_LIST_HEAD(&pf->ports);
+ mutex_init(&pf->lock);
pci_set_drvdata(pdev, pf);
pf->pdev = pdev;
@@ -359,10 +365,14 @@ static int nfp_pci_probe(struct pci_dev *pdev,
nfp_hwinfo_lookup(pf->cpp, "assembly.revision"),
nfp_hwinfo_lookup(pf->cpp, "cpld.version"));
- err = nfp_nsp_init(pdev, pf);
+ err = devlink_register(devlink, &pdev->dev);
if (err)
goto err_cpp_free;
+ err = nfp_nsp_init(pdev, pf);
+ if (err)
+ goto err_devlink_unreg;
+
nfp_pcie_sriov_read_nfd_limit(pf);
err = nfp_net_pci_probe(pf);
@@ -375,11 +385,14 @@ err_fw_unload:
if (pf->fw_loaded)
nfp_fw_unload(pf);
kfree(pf->eth_tbl);
+err_devlink_unreg:
+ devlink_unregister(devlink);
err_cpp_free:
nfp_cpp_free(pf->cpp);
err_disable_msix:
pci_set_drvdata(pdev, NULL);
- kfree(pf);
+ mutex_destroy(&pf->lock);
+ devlink_free(devlink);
err_rel_regions:
pci_release_regions(pdev);
err_pci_disable:
@@ -391,11 +404,16 @@ err_pci_disable:
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(pf);
nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
+ devlink_unregister(devlink);
+
if (pf->fw_loaded)
nfp_fw_unload(pf);
@@ -403,7 +421,8 @@ static void nfp_pci_remove(struct pci_dev *pdev)
nfp_cpp_free(pf->cpp);
kfree(pf->eth_tbl);
- kfree(pf);
+ mutex_destroy(&pf->lock);
+ devlink_free(devlink);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index b57de047b002..526db8029dea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -47,6 +47,7 @@
#include <linux/workqueue.h>
struct dentry;
+struct devlink_ops;
struct pci_dev;
struct nfp_cpp;
@@ -57,27 +58,31 @@ struct nfp_eth_table;
* struct nfp_pf - NFP PF-specific device structure
* @pdev: Backpointer to PCI device
* @cpp: Pointer to the CPP handle
- * @ctrl_area: Pointer to the CPP area for the control BAR
+ * @app: Pointer to the APP handle
+ * @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @tx_area: Pointer to the CPP area for the TX queues
* @rx_area: Pointer to the CPP area for the FL/RX queues
- * @irq_entries: Array of MSI-X entries for all ports
+ * @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
* @fw_loaded: Is the firmware loaded?
* @eth_tbl: NSP ETH table
* @ddir: Per-device debugfs directory
- * @num_ports: Number of adapter ports app firmware supports
- * @num_netdevs: Number of netdevs spawned
- * @ports: Linked list of port structures (struct nfp_net)
- * @port_lock: Protects @ports, @num_ports, @num_netdevs
+ * @max_data_vnics: Number of data vNICs app firmware supports
+ * @num_vnics: Number of vNICs spawned
+ * @vnics: Linked list of vNIC structures (struct nfp_net)
+ * @ports: Linked list of port structures (struct nfp_port)
* @port_refresh_work: Work entry for taking netdevs out
+ * @lock: Protects all fields which may change after probe
*/
struct nfp_pf {
struct pci_dev *pdev;
struct nfp_cpp *cpp;
- struct nfp_cpp_area *ctrl_area;
+ struct nfp_app *app;
+
+ struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *tx_area;
struct nfp_cpp_area *rx_area;
@@ -92,16 +97,19 @@ struct nfp_pf {
struct dentry *ddir;
- unsigned int num_ports;
- unsigned int num_netdevs;
+ unsigned int max_data_vnics;
+ unsigned int num_vnics;
+ struct list_head vnics;
struct list_head ports;
struct work_struct port_refresh_work;
- struct mutex port_lock;
+ struct mutex lock;
};
extern struct pci_driver nfp_netvf_pci_driver;
+extern const struct devlink_ops nfp_devlink_ops;
+
int nfp_net_pci_probe(struct nfp_pf *pf);
void nfp_net_pci_remove(struct nfp_pf *pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index fcf81b3be830..7882d2604835 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -84,7 +84,7 @@
#define NFP_NET_NON_Q_VECTORS 2
#define NFP_NET_IRQ_LSC_IDX 0
#define NFP_NET_IRQ_EXN_IDX 1
-#define NFP_NET_MIN_PORT_IRQS (NFP_NET_NON_Q_VECTORS + 1)
+#define NFP_NET_MIN_VNIC_IRQS (NFP_NET_NON_Q_VECTORS + 1)
/* Queue/Ring definitions */
#define NFP_NET_MAX_TX_RINGS 64 /* Max. # of Tx rings per device */
@@ -102,6 +102,7 @@
#define NFP_NET_RX_DESCS_DEFAULT 4096 /* Default # of Rx descs per ring */
#define NFP_NET_FL_BATCH 16 /* Add freelist in this Batch size */
+#define NFP_NET_XDP_MAX_COMPLETE 2048 /* XDP bufs to reclaim in NAPI poll */
/* Offload definitions */
#define NFP_NET_N_VXLAN_PORTS (NFP_NET_CFG_VXLAN_SZ / sizeof(__be16))
@@ -115,6 +116,10 @@ struct nfp_cpp;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
+struct nfp_port;
+
+/* Convenience macro for wrapping descriptor index on ring size */
+#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
/* Convenience macro for writing dma address into RX/TX descriptors */
#define nfp_desc_set_dma_addr(desc, dma_addr) \
@@ -153,10 +158,15 @@ struct nfp_net_tx_desc {
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
- u8 l4_offset; /* LSO, where the L4 data starts */
+ u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
-
- __le16 vlan; /* VLAN tag to add if indicated */
+ union {
+ struct {
+ u8 l3_offset; /* L3 header offset */
+ u8 l4_offset; /* L4 header offset */
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
@@ -287,9 +297,11 @@ struct nfp_net_rx_desc {
#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
struct nfp_meta_parsed {
- u32 hash_type;
+ u8 hash_type;
+ u8 csum_type;
u32 hash;
u32 mark;
+ __wsum csum;
};
struct nfp_net_rx_hash {
@@ -531,7 +543,6 @@ struct nfp_net_dp {
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
- * @link_changed: Has link state changes since last port refresh?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
* @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
@@ -544,10 +555,10 @@ struct nfp_net_dp {
* @rx_bar: Pointer to mapped FL/RX queues
* @debugfs_dir: Device directory in debugfs
* @ethtool_dump_flag: Ethtool dump flag
- * @port_list: Entry on device port list
+ * @vnic_list: Entry on device vNIC list
* @pdev: Backpointer to PCI device
- * @cpp: CPP device handle if available
- * @eth_port: Translated ETH Table port entry
+ * @app: APP handle if available
+ * @port: Pointer to nfp_port structure if vNIC is a port
*/
struct nfp_net {
struct nfp_net_dp dp;
@@ -589,7 +600,6 @@ struct nfp_net {
u32 me_freq_mhz;
bool link_up;
- bool link_changed;
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
@@ -614,12 +624,12 @@ struct nfp_net {
struct dentry *debugfs_dir;
u32 ethtool_dump_flag;
- struct list_head port_list;
+ struct list_head vnic_list;
struct pci_dev *pdev;
- struct nfp_cpp *cpp;
+ struct nfp_app *app;
- struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
};
/* Functions to read/write from/to a BAR
@@ -791,16 +801,25 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
/* Globals */
extern const char nfp_driver_version[];
+extern const struct net_device_ops nfp_net_netdev_ops;
+
+static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &nfp_net_netdev_ops;
+}
+
/* Prototypes */
void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_netdev_alloc(struct pci_dev *pdev,
- unsigned int max_tx_rings, unsigned int max_rx_rings);
-void nfp_net_netdev_free(struct nfp_net *nn);
-int nfp_net_netdev_init(struct net_device *netdev);
-void nfp_net_netdev_clean(struct net_device *netdev);
+nfp_net_alloc(struct pci_dev *pdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings);
+void nfp_net_free(struct nfp_net *nn);
+
+int nfp_net_init(struct nfp_net *nn);
+void nfp_net_clean(struct nfp_net *nn);
+
void nfp_net_set_ethtool_ops(struct net_device *netdev);
void nfp_net_info(struct nfp_net *nn);
int nfp_net_reconfig(struct nfp_net *nn, u32 update);
@@ -821,15 +840,11 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
-bool nfp_net_link_changed_read_clear(struct nfp_net *nn);
-int nfp_net_refresh_eth_port(struct nfp_net *nn);
-void nfp_net_refresh_port_table(struct nfp_net *nn);
-
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
-void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id);
void nfp_net_debugfs_dir_clean(struct dentry **dir);
#else
static inline void nfp_net_debugfs_create(void)
@@ -846,7 +861,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
}
static inline void
-nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
+nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 82bd6b0935f1..b3f5c8af6789 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -70,6 +70,7 @@
#include "nfpcore/nfp_nsp.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_port.h"
/**
* nfp_net_get_fw_version() - Read and parse the FW version
@@ -391,19 +392,6 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
return IRQ_HANDLED;
}
-bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
-{
- unsigned long flags;
- bool ret;
-
- spin_lock_irqsave(&nn->link_status_lock, flags);
- ret = nn->link_changed;
- nn->link_changed = false;
- spin_unlock_irqrestore(&nn->link_status_lock, flags);
-
- return ret;
-}
-
/**
* nfp_net_read_link_status() - Reread link status from control BAR
* @nn: NFP Network structure
@@ -423,7 +411,8 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
goto out;
nn->link_up = link_up;
- nn->link_changed = true;
+ if (nn->port)
+ set_bit(NFP_PORT_CHANGED, &nn->port->flags);
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
@@ -516,11 +505,10 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
/**
* nfp_net_vecs_init() - Assign IRQs and setup rvecs.
- * @netdev: netdev structure
+ * @nn: NFP Network structure
*/
-static void nfp_net_vecs_init(struct net_device *netdev)
+static void nfp_net_vecs_init(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_r_vector *r_vec;
int r;
@@ -667,17 +655,22 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
if (!skb_is_gso(skb))
return;
- if (!skb->encapsulation)
+ if (!skb->encapsulation) {
+ txd->l3_offset = skb_network_offset(skb);
+ txd->l4_offset = skb_transport_offset(skb);
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
- else
+ } else {
+ txd->l3_offset = skb_inner_network_offset(skb);
+ txd->l4_offset = skb_inner_transport_offset(skb);
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
+ }
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->l4_offset = hdrlen;
+ txd->lso_hdrlen = hdrlen;
txd->mss = cpu_to_le16(mss);
txd->flags |= PCIE_DESC_TX_LSO;
@@ -804,7 +797,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(dp->dev, dma_addr))
goto err_free;
- wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -823,12 +816,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->flags = 0;
txd->mss = 0;
- txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
+ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
nfp_net_tx_tso(r_vec, txbuf, txd, skb);
-
nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
-
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
@@ -848,7 +840,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
if (dma_mapping_error(dp->dev, dma_addr))
goto err_unmap;
- wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, wr_idx + 1);
tx_ring->txbufs[wr_idx].skb = skb;
tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
tx_ring->txbufs[wr_idx].fidx = f;
@@ -936,14 +928,10 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
if (qcp_rd_p == tx_ring->qcp_rd_p)
return;
- if (qcp_rd_p > tx_ring->qcp_rd_p)
- todo = qcp_rd_p - tx_ring->qcp_rd_p;
- else
- todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ todo = D_IDX(tx_ring, qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p);
while (todo--) {
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
- tx_ring->rd_p++;
+ idx = D_IDX(tx_ring, tx_ring->rd_p++);
skb = tx_ring->txbufs[idx].skb;
if (!skb)
@@ -997,45 +985,45 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
}
-static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
+static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
u32 done_pkts = 0, done_bytes = 0;
+ bool done_all;
int idx, todo;
u32 qcp_rd_p;
- if (tx_ring->wr_p == tx_ring->rd_p)
- return;
-
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
if (qcp_rd_p == tx_ring->qcp_rd_p)
- return;
+ return true;
- if (qcp_rd_p > tx_ring->qcp_rd_p)
- todo = qcp_rd_p - tx_ring->qcp_rd_p;
- else
- todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
+ todo = D_IDX(tx_ring, qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p);
+
+ done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
+ todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
+
+ tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
done_pkts = todo;
while (todo--) {
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_ring->rd_p++;
done_bytes += tx_ring->txbufs[idx].real_len;
}
- tx_ring->qcp_rd_p = qcp_rd_p;
-
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_bytes += done_bytes;
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
- "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
+ "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
+
+ return done_all;
}
/**
@@ -1056,7 +1044,7 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
struct sk_buff *skb;
int idx, nr_frags;
- idx = tx_ring->rd_p & (tx_ring->cnt - 1);
+ idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_buf = &tx_ring->txbufs[idx];
skb = tx_ring->txbufs[idx].skb;
@@ -1209,7 +1197,7 @@ static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
{
unsigned int wr_idx;
- wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
nfp_net_dma_sync_dev_rx(dp, dma_addr);
@@ -1247,7 +1235,7 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
unsigned int wr_idx, last_idx;
/* Move the empty entry to the end of the list */
- wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
+ wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1;
rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
@@ -1350,17 +1338,28 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
* @dp: NFP Net data path struct
* @r_vec: per-ring structure
* @rxd: Pointer to RX descriptor
+ * @meta: Parsed metadata prepend
* @skb: Pointer to SKB
*/
static void nfp_net_rx_csum(struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
- struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
+ struct nfp_net_rx_desc *rxd,
+ struct nfp_meta_parsed *meta, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
if (!(dp->netdev->features & NETIF_F_RXCSUM))
return;
+ if (meta->csum_type) {
+ skb->ip_summed = meta->csum_type;
+ skb->csum = meta->csum;
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->hw_csum_rx_ok++;
+ u64_stats_update_end(&r_vec->rx_sync);
+ return;
+ }
+
if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->hw_csum_rx_error++;
@@ -1445,6 +1444,12 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
+ case NFP_NET_META_CSUM:
+ meta->csum_type = CHECKSUM_COMPLETE;
+ meta->csum =
+ (__force __wsum)__get_unaligned_cpu32(data);
+ data += 4;
+ break;
default:
return NULL;
}
@@ -1479,18 +1484,26 @@ static bool
nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
- unsigned int pkt_len)
+ unsigned int pkt_len, bool *completed)
{
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
- nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return false;
+ if (!*completed) {
+ nfp_net_xdp_complete(tx_ring);
+ *completed = true;
+ }
+
+ if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
+ nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
+ NULL);
+ return false;
+ }
}
- wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
+ wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
@@ -1515,7 +1528,7 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
txd->flags = 0;
txd->mss = 0;
- txd->l4_offset = 0;
+ txd->lso_hdrlen = 0;
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
@@ -1559,6 +1572,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
struct nfp_net_tx_ring *tx_ring;
struct bpf_prog *xdp_prog;
+ bool xdp_tx_cmpl = false;
unsigned int true_bufsz;
struct sk_buff *skb;
int pkts_polled = 0;
@@ -1577,7 +1591,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
dma_addr_t new_dma_addr;
void *new_frag;
- idx = rx_ring->rd_p & (rx_ring->cnt - 1);
+ idx = D_IDX(rx_ring, rx_ring->rd_p);
rxd = &rx_ring->rxds[idx];
if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
@@ -1669,7 +1683,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
tx_ring, rxbuf,
dma_off,
- pkt_len)))
+ pkt_len,
+ &xdp_tx_cmpl)))
trace_xdp_exception(dp->netdev,
xdp_prog, act);
continue;
@@ -1708,7 +1723,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, dp->netdev);
- nfp_net_rx_csum(dp, r_vec, rxd, skb);
+ nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -1717,8 +1732,14 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
- if (xdp_prog && tx_ring->wr_ptr_add)
- nfp_net_tx_xmit_more_flush(tx_ring);
+ if (xdp_prog) {
+ if (tx_ring->wr_ptr_add)
+ nfp_net_tx_xmit_more_flush(tx_ring);
+ else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
+ !xdp_tx_cmpl)
+ if (!nfp_net_xdp_complete(tx_ring))
+ pkts_polled = budget;
+ }
rcu_read_unlock();
return pkts_polled;
@@ -1739,11 +1760,8 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (r_vec->tx_ring)
nfp_net_tx_complete(r_vec->tx_ring);
- if (r_vec->rx_ring) {
+ if (r_vec->rx_ring)
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
- if (r_vec->xdp_ring)
- nfp_net_xdp_complete(r_vec->xdp_ring);
- }
if (pkts_polled < budget)
if (napi_complete_done(napi, pkts_polled))
@@ -2197,17 +2215,15 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
new_ctrl = nn->dp.ctrl;
- if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_write_key(nn);
nfp_net_rss_write_itbl(nn);
nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
update |= NFP_NET_CFG_UPDATE_RSS;
}
- if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
+ if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
nfp_net_coalesce_write_cfg(nn);
-
- new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
@@ -2710,9 +2726,9 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & NETIF_F_RXCSUM) {
if (features & NETIF_F_RXCSUM)
- new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
}
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
@@ -2724,9 +2740,10 @@ static int nfp_net_set_features(struct net_device *netdev,
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- new_ctrl |= NFP_NET_CFG_CTRL_LSO;
+ new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
+ NFP_NET_CFG_CTRL_LSO;
else
- new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
+ new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -2818,26 +2835,6 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
-static int
-nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- int err;
-
- if (!nn->eth_port)
- return -EOPNOTSUPP;
-
- if (!nn->eth_port->is_split)
- err = snprintf(name, len, "p%d", nn->eth_port->label_port);
- else
- err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
- nn->eth_port->label_subport);
- if (err >= len)
- return -EINVAL;
-
- return 0;
-}
-
/**
* nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
* @nn: NFP Net device to reconfigure
@@ -3000,7 +2997,7 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
}
}
-static const struct net_device_ops nfp_net_netdev_ops = {
+const struct net_device_ops nfp_net_netdev_ops = {
.ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
@@ -3012,7 +3009,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
+ .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
@@ -3032,7 +3029,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3043,18 +3040,22 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
- nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
- nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
+ nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
- nfp_net_ebpf_capable(nn) ? "BPF " : "");
+ nfp_net_ebpf_capable(nn) ? "BPF " : "",
+ nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
+ "RXCSUM_COMPLETE " : "");
}
/**
- * nfp_net_netdev_alloc() - Allocate netdev and related structure
+ * nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
@@ -3064,9 +3065,9 @@ void nfp_net_info(struct nfp_net *nn)
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *nfp_net_alloc(struct pci_dev *pdev,
+ unsigned int max_tx_rings,
+ unsigned int max_rx_rings)
{
struct net_device *netdev;
struct nfp_net *nn;
@@ -3111,10 +3112,10 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
}
/**
- * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
+ * nfp_net_free() - Undo what @nfp_net_alloc() did
* @nn: NFP Net device to reconfigure
*/
-void nfp_net_netdev_free(struct nfp_net *nn)
+void nfp_net_free(struct nfp_net *nn)
{
free_netdev(nn->dp.netdev);
}
@@ -3188,24 +3189,28 @@ static void nfp_net_irqmod_init(struct nfp_net *nn)
}
/**
- * nfp_net_netdev_init() - Initialise/finalise the netdev structure
- * @netdev: netdev structure
+ * nfp_net_init() - Initialise/finalise the nfp_net structure
+ * @nn: NFP Net device structure
*
* Return: 0 on success or negative errno on error.
*/
-int nfp_net_netdev_init(struct net_device *netdev)
+int nfp_net_init(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct net_device *netdev = nn->dp.netdev;
int err;
- nn->dp.chained_metadata_format = nn->fw_ver.major > 3;
-
nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
/* Get some of the read-only fields from the BAR */
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
+ /* Chained metadata is signalled by capabilities except in version 4 */
+ nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
+ nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
+ if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
+ nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
+
nfp_net_write_mac_addr(nn);
/* Determine RX packet/metadata boundary offset */
@@ -3237,9 +3242,9 @@ int nfp_net_netdev_init(struct net_device *netdev)
* supported. By default we enable most features.
*/
netdev->hw_features = NETIF_F_HIGHDMA;
- if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
netdev->hw_features |= NETIF_F_RXCSUM;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -3249,14 +3254,17 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_SG;
nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
}
- if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
+ if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
+ nn->cap & NFP_NET_CFG_CTRL_LSO2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_LSO;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
+ NFP_NET_CFG_CTRL_LSO;
}
- if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
+ if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
netdev->hw_features |= NETIF_F_RXHASH;
nfp_net_rss_init(nn);
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_RSS;
+ nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
+ NFP_NET_CFG_CTRL_RSS;
}
if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
@@ -3275,8 +3283,12 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
+ nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
+ } else {
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+ }
}
netdev->features = netdev->hw_features;
@@ -3286,6 +3298,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
/* Allow L2 Broadcast and Multicast through by default, if supported */
if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
@@ -3322,19 +3335,17 @@ int nfp_net_netdev_init(struct net_device *netdev)
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);
- nfp_net_vecs_init(netdev);
+ nfp_net_vecs_init(nn);
return register_netdev(netdev);
}
/**
- * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
- * @netdev: netdev structure
+ * nfp_net_clean() - Undo what nfp_net_init() did.
+ * @nn: NFP Net device structure
*/
-void nfp_net_netdev_clean(struct net_device *netdev)
+void nfp_net_clean(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
-
unregister_netdev(nn->dp.netdev);
if (nn->dp.xdp_prog)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d04ccc9f6116..df75b8dc3617 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -71,6 +71,7 @@
#define NFP_NET_META_FIELD_SIZE 4
#define NFP_NET_META_HASH 1 /* next field carries hash type */
#define NFP_NET_META_MARK 2
+#define NFP_NET_META_CSUM 6 /* checksum complete type */
/**
* Hash type pre-pended when a RSS hash was computed
@@ -119,9 +120,9 @@
#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */
#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
-#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO */
+#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
-#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS */
+#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
@@ -131,6 +132,19 @@
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
+#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
+#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
+#define NFP_NET_CFG_CTRL_CSUM_COMPLETE (0x1 << 30) /* Checksum complete */
+
+#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | \
+ NFP_NET_CFG_CTRL_LSO2)
+#define NFP_NET_CFG_CTRL_RSS_ANY (NFP_NET_CFG_CTRL_RSS | \
+ NFP_NET_CFG_CTRL_RSS2)
+#define NFP_NET_CFG_CTRL_RXCSUM_ANY (NFP_NET_CFG_CTRL_RXCSUM | \
+ NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
+ NFP_NET_CFG_CTRL_CSUM_COMPLETE)
+
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4077c59bf782..6cf1b234eecd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -200,7 +200,7 @@ static const struct file_operations nfp_xdp_q_fops = {
.llseek = seq_lseek
};
-void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
+void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
{
struct dentry *queues, *tx, *rx, *xdp;
char name[20];
@@ -209,7 +209,7 @@ void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
if (IS_ERR_OR_NULL(nfp_dir))
return;
- sprintf(name, "port%d", id);
+ sprintf(name, "vnic%d", id);
nn->debugfs_dir = debugfs_create_dir(name, ddir);
if (IS_ERR_OR_NULL(nn->debugfs_dir))
return;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index abbb47e60cc3..84fdbc4b835b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -50,8 +50,10 @@
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
+#include "nfp_port.h"
enum nfp_dump_diag {
NFP_DUMP_NSP_DIAG = 0,
@@ -134,14 +136,14 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
-static void nfp_net_get_nspinfo(struct nfp_net *nn, char *version)
+static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
struct nfp_nsp *nsp;
- if (!nn->cpp)
+ if (!app)
return;
- nsp = nfp_nsp_open(nn->cpp);
+ nsp = nfp_nsp_open(app->cpp);
if (IS_ERR(nsp))
return;
@@ -162,7 +164,7 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
- nfp_net_get_nspinfo(nn, nsp_version);
+ nfp_net_get_nspinfo(nn->app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d.%d %s",
nn->fw_ver.resv, nn->fw_ver.class,
@@ -195,37 +197,38 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
[NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
[NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
};
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_net *nn;
u32 sts, ls;
+ /* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
cmd->base.port = PORT_OTHER;
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
- if (nn->eth_port)
- cmd->base.autoneg = nn->eth_port->aneg != NFP_ANEG_DISABLED ?
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (eth_port)
+ cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
if (!netif_carrier_ok(netdev))
return 0;
/* Use link speed from ETH table if available, otherwise try the BAR */
- if (nn->eth_port) {
- int err;
-
- if (nfp_net_link_changed_read_clear(nn)) {
- err = nfp_net_refresh_eth_port(nn);
- if (err)
- return err;
- }
-
- cmd->base.port = nn->eth_port->port_type;
- cmd->base.speed = nn->eth_port->speed;
+ if (eth_port) {
+ cmd->base.port = eth_port->port_type;
+ cmd->base.speed = eth_port->speed;
cmd->base.duplex = DUPLEX_FULL;
return 0;
}
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return -EOPNOTSUPP;
+ nn = netdev_priv(netdev);
+
sts = nn_readl(nn, NFP_NET_CFG_STS);
ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
@@ -246,19 +249,22 @@ static int
nfp_net_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
struct nfp_nsp *nsp;
int err;
- if (!nn->eth_port)
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
return -EOPNOTSUPP;
if (netif_running(netdev)) {
- nn_warn(nn, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
return -EBUSY;
}
- nsp = nfp_eth_config_start(nn->cpp, nn->eth_port->index);
+ nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
if (IS_ERR(nsp))
return PTR_ERR(nsp);
@@ -267,7 +273,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (err)
goto err_bad_set;
if (cmd->base.speed != SPEED_UNKNOWN) {
- u32 speed = cmd->base.speed / nn->eth_port->lanes;
+ u32 speed = cmd->base.speed / eth_port->lanes;
err = __nfp_eth_set_speed(nsp, speed);
if (err)
@@ -278,7 +284,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (err > 0)
return 0; /* no change */
- nfp_net_refresh_port_table(nn);
+ nfp_net_refresh_port_table(port);
return err;
@@ -496,7 +502,7 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
cmd->data = 0;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
@@ -533,7 +539,7 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
u32 nfp_rss_flag;
int err;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
/* RSS only supports IP SA/DA and L4 src/dst ports */
@@ -595,7 +601,7 @@ static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return 0;
return ARRAY_SIZE(nn->rss_itbl);
@@ -605,7 +611,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
return nfp_net_rss_key_sz(nn);
@@ -617,7 +623,7 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
struct nfp_net *nn = netdev_priv(netdev);
int i;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
if (indir)
@@ -641,7 +647,7 @@ static int nfp_net_set_rxfh(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
int i;
- if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
+ if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
!(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
@@ -706,13 +712,13 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
struct nfp_resource *res;
int ret;
- if (!nn->cpp)
+ if (!nn->app)
return -EOPNOTSUPP;
dump->version = 1;
dump->flag = NFP_DUMP_NSP_DIAG;
- res = nfp_resource_acquire(nn->cpp, NFP_RESOURCE_NSP_DIAG);
+ res = nfp_resource_acquire(nn->app->cpp, NFP_RESOURCE_NSP_DIAG);
if (IS_ERR(res))
return PTR_ERR(res);
@@ -722,7 +728,7 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer)
goto exit_release;
}
- ret = nfp_cpp_read(nn->cpp, nfp_resource_cpp_id(res),
+ ret = nfp_cpp_read(nn->app->cpp, nfp_resource_cpp_id(res),
nfp_resource_address(res),
buffer, dump->len);
if (ret != dump->len)
@@ -743,7 +749,7 @@ static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
struct nfp_net *nn = netdev_priv(netdev);
- if (!nn->cpp)
+ if (!nn->app)
return -EOPNOTSUPP;
if (val->flag != NFP_DUMP_NSP_DIAG)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 8cb87cbe1120..388759e047d8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -43,6 +43,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/lockdep.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/msi.h>
@@ -54,10 +55,11 @@
#include "nfpcore/nfp_nffw.h"
#include "nfpcore/nfp_nsp.h"
#include "nfpcore/nfp6000_pcie.h"
-
+#include "nfp_app.h"
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_main.h"
+#include "nfp_port.h"
#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
@@ -142,14 +144,16 @@ err_area:
static void
nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
{
+ struct nfp_eth_table_port *eth_port;
struct nfp_net_dp *dp = &nn->dp;
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
- if (nn->eth_port) {
- ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
- ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
+ eth_port = __nfp_port_get_eth_port(nn->port);
+ if (eth_port) {
+ ether_addr_copy(dp->netdev->dev_addr, eth_port->mac_addr);
+ ether_addr_copy(dp->netdev->perm_addr, eth_port->mac_addr);
return;
}
@@ -190,21 +194,17 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
{
char name[256];
- u16 interface;
- int pcie_pf;
int err = 0;
u64 val;
- interface = nfp_cpp_interface(pf->cpp);
- pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
-
- snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
+ snprintf(name, sizeof(name), "nfd_cfg_pf%u_num_ports",
+ nfp_cppcore_pcie_unit(pf->cpp));
val = nfp_rtsym_read_le(pf->cpp, name, &err);
- /* Default to one port */
+ /* Default to one port/vNIC */
if (err) {
if (err != -ENOENT)
- nfp_err(pf->cpp, "Unable to read adapter port count\n");
+ nfp_err(pf->cpp, "Unable to read adapter vNIC count\n");
val = 1;
}
@@ -220,7 +220,7 @@ nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
min_qc = readl(ctrl_bar + start_off);
max_qc = min_qc;
- for (i = 0; i < pf->num_ports; i++) {
+ for (i = 0; i < pf->max_data_vnics; i++) {
/* To make our lives simpler only accept configuration where
* queues are allocated to PFs in order (queues of PFn all have
* indexes lower than PFn+1).
@@ -241,13 +241,9 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
const struct nfp_rtsym *ctrl_sym;
u8 __iomem *ctrl_bar;
char pf_symbol[256];
- u16 interface;
- int pcie_pf;
- interface = nfp_cpp_interface(pf->cpp);
- pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
-
- snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
+ snprintf(pf_symbol, sizeof(pf_symbol), "_pf%u_net_bar0",
+ nfp_cppcore_pcie_unit(pf->cpp));
ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
if (!ctrl_sym) {
@@ -256,17 +252,17 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
return NULL;
}
- if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
+ if (ctrl_sym->size < pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE) {
dev_err(&pf->pdev->dev,
- "PF BAR0 too small to contain %d ports\n",
- pf->num_ports);
+ "PF BAR0 too small to contain %d vNICs\n",
+ pf->max_data_vnics);
return NULL;
}
ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
ctrl_sym->domain, ctrl_sym->target,
ctrl_sym->addr, ctrl_sym->size,
- &pf->ctrl_area);
+ &pf->data_vnic_bar);
if (IS_ERR(ctrl_bar)) {
dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
PTR_ERR(ctrl_bar));
@@ -276,37 +272,43 @@ static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
return ctrl_bar;
}
-static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
+static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
- struct nfp_net *nn;
+ nfp_port_free(nn->port);
+ list_del(&nn->vnic_list);
+ pf->num_vnics--;
+ nfp_net_free(nn);
+}
- while (!list_empty(&pf->ports)) {
- nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
- list_del(&nn->port_list);
- pf->num_netdevs--;
+static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
+{
+ struct nfp_net *nn;
- nfp_net_netdev_free(nn);
+ while (!list_empty(&pf->vnics)) {
+ nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list);
+ nfp_net_pf_free_vnic(pf, nn);
}
}
static struct nfp_net *
-nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
- void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver,
- struct nfp_eth_table_port *eth_port)
+nfp_net_pf_alloc_vnic(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ void __iomem *tx_bar, void __iomem *rx_bar,
+ int stride, struct nfp_net_fw_version *fw_ver,
+ unsigned int eth_id)
{
+ struct nfp_eth_table_port *eth_port;
u32 n_tx_rings, n_rx_rings;
struct nfp_net *nn;
n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
- /* Allocate and initialise the netdev */
- nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
+ /* Allocate and initialise the vNIC */
+ nn = nfp_net_alloc(pf->pdev, n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
- nn->cpp = pf->cpp;
+ nn->app = pf->app;
nn->fw_ver = *fw_ver;
nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
@@ -314,14 +316,27 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
- nn->eth_port = eth_port;
+
+ eth_port = nfp_net_find_port(pf->eth_tbl, eth_id);
+ if (eth_port) {
+ nn->port = nfp_port_alloc(pf->app, NFP_PORT_PHYS_PORT,
+ nn->dp.netdev);
+ if (IS_ERR(nn->port)) {
+ nfp_net_free(nn);
+ return ERR_CAST(nn->port);
+ }
+ nn->port->eth_id = eth_id;
+ nn->port->eth_port = eth_port;
+ }
+
+ pf->num_vnics++;
+ list_add_tail(&nn->vnic_list, &pf->vnics);
return nn;
}
static int
-nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
- unsigned int id)
+nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
{
int err;
@@ -334,32 +349,48 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->dp.netdev);
+ err = nfp_net_init(nn);
if (err)
return err;
- nfp_net_debugfs_port_add(nn, pf->ddir, id);
+ nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
+
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ goto err_dfs_clean;
+ }
nfp_net_info(nn);
return 0;
+
+err_dfs_clean:
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_clean(nn);
+ return err;
}
static int
-nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
- void __iomem *tx_bar, void __iomem *rx_bar,
- int stride, struct nfp_net_fw_version *fw_ver)
+nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
+ void __iomem *tx_bar, void __iomem *rx_bar,
+ int stride, struct nfp_net_fw_version *fw_ver)
{
u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
- struct nfp_eth_table_port *eth_port;
struct nfp_net *nn;
unsigned int i;
int err;
+ if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) {
+ nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
+ pf->max_data_vnics, pf->eth_tbl->count);
+ return -EINVAL;
+ }
+
prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
- for (i = 0; i < pf->num_ports; i++) {
+ for (i = 0; i < pf->max_data_vnics; i++) {
tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
@@ -367,53 +398,65 @@ nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
prev_tx_base = tgt_tx_base;
prev_rx_base = tgt_rx_base;
- eth_port = nfp_net_find_port(pf->eth_tbl, i);
- if (eth_port && eth_port->override_changed) {
- nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
- } else {
- nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
- rx_bar, stride,
- fw_ver, eth_port);
- if (IS_ERR(nn)) {
- err = PTR_ERR(nn);
- goto err_free_prev;
- }
- list_add_tail(&nn->port_list, &pf->ports);
- pf->num_netdevs++;
+ nn = nfp_net_pf_alloc_vnic(pf, ctrl_bar, tx_bar, rx_bar,
+ stride, fw_ver, i);
+ if (IS_ERR(nn)) {
+ err = PTR_ERR(nn);
+ goto err_free_prev;
}
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+
+ /* Check if vNIC has external port associated and cfg is OK */
+ if (pf->eth_tbl && !nn->port) {
+ nfp_err(pf->cpp, "NSP port entries don't match vNICs (no entry for port #%d)\n", i);
+ err = -EINVAL;
+ goto err_free_prev;
+ }
+ if (nn->port && nn->port->eth_port->override_changed) {
+ nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
+ nfp_net_pf_free_vnic(pf, nn);
+ continue;
+ }
}
- if (list_empty(&pf->ports))
+ if (list_empty(&pf->vnics))
return -ENODEV;
return 0;
err_free_prev:
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
return err;
}
+static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
+{
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
+ nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+ nfp_net_clean(nn);
+}
+
static int
-nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
- void __iomem *ctrl_bar, void __iomem *tx_bar,
- void __iomem *rx_bar, int stride,
- struct nfp_net_fw_version *fw_ver)
+nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
+ void __iomem *ctrl_bar, void __iomem *tx_bar,
+ void __iomem *rx_bar, int stride,
+ struct nfp_net_fw_version *fw_ver)
{
- unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
+ unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left;
struct nfp_net *nn;
int err;
- /* Allocate the netdevs and do basic init */
- err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
- stride, fw_ver);
+ /* Allocate the vnics and do basic init */
+ err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, tx_bar, rx_bar,
+ stride, fw_ver);
if (err)
return err;
/* Get MSI-X vectors */
wanted_irqs = 0;
- list_for_each_entry(nn, &pf->ports, port_list)
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
@@ -423,7 +466,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
}
num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
- NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
+ NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
wanted_irqs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
@@ -431,23 +474,23 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
goto err_vec_free;
}
- /* Distribute IRQs to ports */
+ /* Distribute IRQs to vNICs */
irqs_left = num_irqs;
- ports_left = pf->num_netdevs;
- list_for_each_entry(nn, &pf->ports, port_list) {
+ vnics_left = pf->num_vnics;
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
unsigned int n;
- n = DIV_ROUND_UP(irqs_left, ports_left);
+ n = DIV_ROUND_UP(irqs_left, vnics_left);
nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
n);
irqs_left -= n;
- ports_left--;
+ vnics_left--;
}
- /* Finish netdev init and register */
+ /* Finish vNIC init and register */
id = 0;
- list_for_each_entry(nn, &pf->ports, port_list) {
- err = nfp_net_pf_init_port_netdev(pf, nn, id);
+ list_for_each_entry(nn, &pf->vnics, vnic_list) {
+ err = nfp_net_pf_init_vnic(pf, nn, id);
if (err)
goto err_prev_deinit;
@@ -457,18 +500,29 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
return 0;
err_prev_deinit:
- list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->dp.netdev);
- }
+ list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
+ nfp_net_pf_clean_vnic(pf, nn);
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
kfree(pf->irq_entries);
err_nn_free:
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
return err;
}
+static int nfp_net_pf_app_init(struct nfp_pf *pf)
+{
+ pf->app = nfp_app_alloc(pf);
+
+ return PTR_ERR_OR_ZERO(pf->app);
+}
+
+static void nfp_net_pf_app_clean(struct nfp_pf *pf)
+{
+ nfp_app_free(pf->app);
+ pf->app = NULL;
+}
+
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
nfp_net_debugfs_dir_clean(&pf->ddir);
@@ -476,99 +530,126 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
nfp_net_irqs_disable(pf->pdev);
kfree(pf->irq_entries);
+ nfp_net_pf_app_clean(pf);
+
nfp_cpp_area_release_free(pf->rx_area);
nfp_cpp_area_release_free(pf->tx_area);
- nfp_cpp_area_release_free(pf->ctrl_area);
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
}
-static void nfp_net_refresh_netdevs(struct work_struct *work)
+static int
+nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
+ struct nfp_eth_table *eth_table)
+{
+ struct nfp_eth_table_port *eth_port;
+
+ ASSERT_RTNL();
+
+ eth_port = nfp_net_find_port(eth_table, port->eth_id);
+ if (!eth_port) {
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
+ port->eth_id);
+ return -EIO;
+ }
+ if (eth_port->override_changed) {
+ nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
+ port->type = NFP_PORT_INVALID;
+ }
+
+ memcpy(port->eth_port, eth_port, sizeof(*eth_port));
+
+ return 0;
+}
+
+int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
{
- struct nfp_pf *pf = container_of(work, struct nfp_pf,
- port_refresh_work);
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
+ struct nfp_port *port;
- mutex_lock(&pf->port_lock);
+ lockdep_assert_held(&pf->lock);
/* Check for nfp_net_pci_remove() racing against us */
- if (list_empty(&pf->ports))
- goto out;
+ if (list_empty(&pf->vnics))
+ return 0;
- list_for_each_entry(nn, &pf->ports, port_list)
- nfp_net_link_changed_read_clear(nn);
+ /* Update state of all ports */
+ rtnl_lock();
+ list_for_each_entry(port, &pf->ports, port_list)
+ clear_bit(NFP_PORT_CHANGED, &port->flags);
eth_table = nfp_eth_read_ports(pf->cpp);
if (!eth_table) {
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (__nfp_port_get_eth_port(port))
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ rtnl_unlock();
nfp_err(pf->cpp, "Error refreshing port config!\n");
- goto out;
+ return -EIO;
}
- rtnl_lock();
- list_for_each_entry(nn, &pf->ports, port_list) {
- if (!nn->eth_port)
- continue;
- nn->eth_port = nfp_net_find_port(eth_table,
- nn->eth_port->eth_index);
- }
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (__nfp_port_get_eth_port(port))
+ nfp_net_eth_port_update(pf->cpp, port, eth_table);
rtnl_unlock();
- kfree(pf->eth_tbl);
- pf->eth_tbl = eth_table;
+ kfree(eth_table);
- list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
- if (!nn->eth_port) {
- nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
+ /* Shoot off the ports which became invalid */
+ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
+ if (!nn->port || nn->port->type != NFP_PORT_INVALID)
continue;
- }
- if (!nn->eth_port->override_changed)
- continue;
-
- nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
- nfp_net_netdev_clean(nn->dp.netdev);
-
- list_del(&nn->port_list);
- pf->num_netdevs--;
- nfp_net_netdev_free(nn);
+ nfp_net_pf_clean_vnic(pf, nn);
+ nfp_net_pf_free_vnic(pf, nn);
}
- if (list_empty(&pf->ports))
+ if (list_empty(&pf->vnics))
nfp_net_pci_remove_finish(pf);
-out:
- mutex_unlock(&pf->port_lock);
+
+ return 0;
}
-void nfp_net_refresh_port_table(struct nfp_net *nn)
+static void nfp_net_refresh_vnics(struct work_struct *work)
{
- struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
+ struct nfp_pf *pf = container_of(work, struct nfp_pf,
+ port_refresh_work);
+
+ mutex_lock(&pf->lock);
+ nfp_net_refresh_port_table_sync(pf);
+ mutex_unlock(&pf->lock);
+}
+
+void nfp_net_refresh_port_table(struct nfp_port *port)
+{
+ struct nfp_pf *pf = port->app->pf;
+
+ set_bit(NFP_PORT_CHANGED, &port->flags);
schedule_work(&pf->port_refresh_work);
}
-int nfp_net_refresh_eth_port(struct nfp_net *nn)
+int nfp_net_refresh_eth_port(struct nfp_port *port)
{
- struct nfp_eth_table_port *eth_port;
+ struct nfp_cpp *cpp = port->app->cpp;
struct nfp_eth_table *eth_table;
+ int ret;
- eth_table = nfp_eth_read_ports(nn->cpp);
- if (!eth_table) {
- nn_err(nn, "Error refreshing port state table!\n");
- return -EIO;
- }
+ clear_bit(NFP_PORT_CHANGED, &port->flags);
- eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
- if (!eth_port) {
- nn_err(nn, "Error finding state of the port!\n");
- kfree(eth_table);
+ eth_table = nfp_eth_read_ports(cpp);
+ if (!eth_table) {
+ set_bit(NFP_PORT_CHANGED, &port->flags);
+ nfp_err(cpp, "Error refreshing port state table!\n");
return -EIO;
}
- memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
+ ret = nfp_net_eth_port_update(cpp, port, eth_table);
kfree(eth_table);
- return 0;
+ return ret;
}
/*
@@ -584,8 +665,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
int stride;
int err;
- INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
- mutex_init(&pf->port_lock);
+ INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
/* Verify that the board has completed initialization */
if (!nfp_is_ready(pf->cpp)) {
@@ -593,8 +673,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
- mutex_lock(&pf->port_lock);
- pf->num_ports = nfp_net_pf_get_num_ports(pf);
+ mutex_lock(&pf->lock);
+ pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
if (!ctrl_bar) {
@@ -667,26 +747,32 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_unmap_tx;
}
+ err = nfp_net_pf_app_init(pf);
+ if (err)
+ goto err_unmap_rx;
+
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
- err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
- stride, &fw_ver);
+ err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, tx_bar, rx_bar,
+ stride, &fw_ver);
if (err)
goto err_clean_ddir;
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
return 0;
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
+ nfp_net_pf_app_clean(pf);
+err_unmap_rx:
nfp_cpp_area_release_free(pf->rx_area);
err_unmap_tx:
nfp_cpp_area_release_free(pf->tx_area);
err_ctrl_unmap:
- nfp_cpp_area_release_free(pf->ctrl_area);
+ nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unlock:
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
return err;
}
@@ -694,21 +780,18 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn;
- mutex_lock(&pf->port_lock);
- if (list_empty(&pf->ports))
+ mutex_lock(&pf->lock);
+ if (list_empty(&pf->vnics))
goto out;
- list_for_each_entry(nn, &pf->ports, port_list) {
- nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
-
- nfp_net_netdev_clean(nn->dp.netdev);
- }
+ list_for_each_entry(nn, &pf->vnics, vnic_list)
+ nfp_net_pf_clean_vnic(pf, nn);
- nfp_net_pf_free_netdevs(pf);
+ nfp_net_pf_free_vnics(pf);
nfp_net_pci_remove_finish(pf);
out:
- mutex_unlock(&pf->port_lock);
+ mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 86e61be6f35c..3f1c7f0f392e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -202,7 +202,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_netdev_alloc(pdev, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -267,7 +267,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nfp_netvf_get_mac_addr(nn);
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
- NFP_NET_MIN_PORT_IRQS,
+ NFP_NET_MIN_VNIC_IRQS,
NFP_NET_NON_Q_VECTORS +
nn->dp.num_r_vecs);
if (!num_irqs) {
@@ -283,13 +283,13 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
*/
nn->me_freq_mhz = 1200;
- err = nfp_net_netdev_init(nn->dp.netdev);
+ err = nfp_net_init(nn);
if (err)
goto err_irqs_disable;
nfp_net_info(nn);
vf->ddir = nfp_net_debugfs_device_add(pdev);
- nfp_net_debugfs_port_add(nn, vf->ddir, 0);
+ nfp_net_debugfs_vnic_add(nn, vf->ddir, 0);
return 0;
@@ -304,7 +304,7 @@ err_unmap_tx:
else
iounmap(vf->q_bar);
err_netdev_free:
- nfp_net_netdev_free(nn);
+ nfp_net_free(nn);
err_ctrl_unmap:
iounmap(ctrl_bar);
err_pci_regions:
@@ -328,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
- nfp_net_netdev_clean(nn->dp.netdev);
+ nfp_net_clean(nn);
nfp_net_irqs_disable(pdev);
@@ -340,7 +340,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
}
iounmap(nn->dp.ctrl_bar);
- nfp_net_netdev_free(nn);
+ nfp_net_free(nn);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
new file mode 100644
index 000000000000..a17410ac01ab
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/lockdep.h>
+
+#include "nfpcore/nfp_nsp.h"
+#include "nfp_app.h"
+#include "nfp_main.h"
+#include "nfp_net.h"
+#include "nfp_port.h"
+
+struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
+{
+ struct nfp_net *nn;
+
+ if (WARN_ON(!nfp_netdev_is_nfp_net(netdev)))
+ return NULL;
+ nn = netdev_priv(netdev);
+
+ return nn->port;
+}
+
+struct nfp_port *
+nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
+{
+ struct nfp_port *port;
+
+ lockdep_assert_held(&pf->lock);
+
+ if (type != NFP_PORT_PHYS_PORT)
+ return NULL;
+
+ list_for_each_entry(port, &pf->ports, port_list)
+ if (port->eth_id == id)
+ return port;
+
+ return NULL;
+}
+
+struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port)
+{
+ if (!port)
+ return NULL;
+ if (port->type != NFP_PORT_PHYS_PORT)
+ return NULL;
+
+ return port->eth_port;
+}
+
+struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port)
+{
+ if (!__nfp_port_get_eth_port(port))
+ return NULL;
+
+ if (test_bit(NFP_PORT_CHANGED, &port->flags))
+ if (nfp_net_refresh_eth_port(port))
+ return NULL;
+
+ return __nfp_port_get_eth_port(port);
+}
+
+int
+nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int n;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!eth_port->is_split)
+ n = snprintf(name, len, "p%d", eth_port->label_port);
+ else
+ n = snprintf(name, len, "p%ds%d", eth_port->label_port,
+ eth_port->label_subport);
+ if (n >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct nfp_port *
+nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
+ struct net_device *netdev)
+{
+ struct nfp_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->netdev = netdev;
+ port->type = type;
+ port->app = app;
+
+ list_add_tail(&port->port_list, &app->pf->ports);
+
+ return port;
+}
+
+void nfp_port_free(struct nfp_port *port)
+{
+ if (!port)
+ return;
+ list_del(&port->port_list);
+ kfree(port);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
new file mode 100644
index 000000000000..4d1a9b3fed41
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _NFP_PORT_H_
+#define _NFP_PORT_H_
+
+#include <net/devlink.h>
+
+struct net_device;
+struct nfp_app;
+struct nfp_pf;
+struct nfp_port;
+
+/**
+ * enum nfp_port_type - type of port NFP can switch traffic to
+ * @NFP_PORT_INVALID: port is invalid, %NFP_PORT_PHYS_PORT transitions to this
+ * state when port disappears because of FW fault or config
+ * change
+ * @NFP_PORT_PHYS_PORT: external NIC port
+ */
+enum nfp_port_type {
+ NFP_PORT_INVALID,
+ NFP_PORT_PHYS_PORT,
+};
+
+/**
+ * enum nfp_port_flags - port flags (can be type-specific)
+ * @NFP_PORT_CHANGED: port state has changed since last eth table refresh;
+ * for NFP_PORT_PHYS_PORT, never set otherwise; must hold
+ * rtnl_lock to clear
+ */
+enum nfp_port_flags {
+ NFP_PORT_CHANGED = 0,
+};
+
+/**
+ * struct nfp_port - structure representing NFP port
+ * @netdev: backpointer to associated netdev
+ * @type: what port type does the entity represent
+ * @flags: port flags
+ * @app: backpointer to the app structure
+ * @dl_port: devlink port structure
+ * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
+ * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
+ * @port_list: entry on pf's list of ports
+ */
+struct nfp_port {
+ struct net_device *netdev;
+ enum nfp_port_type type;
+
+ unsigned long flags;
+
+ struct nfp_app *app;
+
+ struct devlink_port dl_port;
+
+ unsigned int eth_id;
+ struct nfp_eth_table_port *eth_port;
+
+ struct list_head port_list;
+};
+
+struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
+struct nfp_port *
+nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
+struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
+struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port);
+
+int
+nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len);
+
+struct nfp_port *
+nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
+ struct net_device *netdev);
+void nfp_port_free(struct nfp_port *port);
+
+int nfp_net_refresh_eth_port(struct nfp_port *port);
+void nfp_net_refresh_port_table(struct nfp_port *port);
+int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
+
+int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
+void nfp_devlink_port_unregister(struct nfp_port *port);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index edecc0a27485..154b0b594184 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -289,6 +289,17 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+/**
+ * nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle
+ * @cpp: CPP handle
+ *
+ * Return: PCI unit for the NFP CPP handle
+ */
+static inline u8 nfp_cppcore_pcie_unit(struct nfp_cpp *cpp)
+{
+ return NFP_CPP_INTERFACE_UNIT_of(nfp_cpp_interface(cpp));
+}
+
struct nfp_cpp_explicit;
struct nfp_cpp_explicit_command {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 36b21e4dc56d..84a1d20adae1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -96,6 +96,7 @@ enum nfp_eth_aneg {
* @override_changed: is media reconfig pending?
*
* @port_type: one of %PORT_* defines for ethtool
+ * @port_lanes: total number of lanes on the port (sum of lanes of all subports)
* @is_split: is interface part of a split port
*/
struct nfp_eth_table {
@@ -127,6 +128,8 @@ struct nfp_eth_table {
/* Computed fields */
u8 port_type;
+ unsigned int port_lanes;
+
bool is_split;
} ports[0];
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 639438d8313a..b0f8785c064f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -186,17 +186,19 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
}
static void
-nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
+nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
{
unsigned int i, j;
for (i = 0; i < table->count; i++)
for (j = 0; j < table->count; j++) {
- if (i == j)
- continue;
if (table->ports[i].label_port !=
table->ports[j].label_port)
continue;
+ table->ports[i].port_lanes += table->ports[j].lanes;
+
+ if (i == j)
+ continue;
if (table->ports[i].label_subport ==
table->ports[j].label_subport)
nfp_warn(cpp,
@@ -205,7 +207,6 @@ nfp_eth_mark_split_ports(struct nfp_cpp *cpp, struct nfp_eth_table *table)
table->ports[i].label_subport);
table->ports[i].is_split = true;
- break;
}
}
@@ -289,7 +290,7 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
nfp_eth_port_translate(nsp, &entries[i], i,
&table->ports[j++]);
- nfp_eth_mark_split_ports(cpp, table);
+ nfp_eth_calc_port_geometry(cpp, table);
for (i = 0; i < table->count; i++)
nfp_eth_calc_port_type(cpp, &table->ports[i]);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 2ab1aab7c3fe..e0becec17b09 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -54,7 +54,7 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10
-#define QED_REVISION_VERSION 10
+#define QED_REVISION_VERSION 11
#define QED_ENGINEERING_VERSION 21
#define QED_VERSION \
@@ -92,7 +92,7 @@ enum qed_mcp_protocol_type;
#define QED_MFW_SET_FIELD(name, field, value) \
do { \
- (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
} while (0)
@@ -598,16 +598,11 @@ struct qed_dev {
enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
-#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
- CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
- QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
-
u16 vendor_id;
u16 device_id;
#define QED_DEV_ID_MASK 0xff00
@@ -621,7 +616,6 @@ struct qed_dev {
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
-#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
@@ -633,7 +627,7 @@ struct qed_dev {
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
- u8 num_ports_in_engines;
+ u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
@@ -644,7 +638,6 @@ struct qed_dev {
int pcie_width;
int pcie_speed;
- u8 ver_str[VER_SIZE];
/* Add MF related configuration */
u8 mcp_rev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d883ad5bec6d..b83fe1d9e988 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -923,6 +923,7 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_dcbx_info);
+ p_hwfn->p_dcbx_info = NULL;
}
static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
@@ -944,17 +945,18 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
- p_dest->update_fcoe_dcb_data_flag = update_flag;
+ p_dest->update_fcoe_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
- p_dest->update_roce_dcb_data_flag = update_flag;
+ p_dest->update_roce_dcb_data_mode = update_flag;
+
update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
- p_dest->update_roce_dcb_data_flag = update_flag;
+ p_dest->update_rroce_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
- p_dest->update_iscsi_dcb_data_flag = update_flag;
+ p_dest->update_iscsi_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
- p_dest->update_eth_dcb_data_flag = update_flag;
+ p_dest->update_eth_dcb_data_mode = update_flag;
p_dcb_data = &p_dest->fcoe_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 483241b4b05d..87a1389fb4a8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -15,13 +15,6 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
-/* Chip IDs enum */
-enum chip_ids {
- CHIP_BB_B0,
- CHIP_K2,
- MAX_CHIP_IDS
-};
-
/* Memory groups enum */
enum mem_groups {
MEM_GROUP_PXP_MEM,
@@ -33,7 +26,6 @@ enum mem_groups {
MEM_GROUP_BRB_MEM,
MEM_GROUP_PRS_MEM,
MEM_GROUP_SDM_MEM,
- MEM_GROUP_PBUF,
MEM_GROUP_IOR,
MEM_GROUP_RAM,
MEM_GROUP_BTB_RAM,
@@ -45,6 +37,7 @@ enum mem_groups {
MEM_GROUP_CAU_PI,
MEM_GROUP_CAU_MEM,
MEM_GROUP_PXP_ILT,
+ MEM_GROUP_PBUF,
MEM_GROUP_MULD_MEM,
MEM_GROUP_BTB_MEM,
MEM_GROUP_IGU_MEM,
@@ -66,7 +59,6 @@ static const char * const s_mem_group_names[] = {
"BRB_MEM",
"PRS_MEM",
"SDM_MEM",
- "PBUF",
"IOR",
"RAM",
"BTB_RAM",
@@ -78,6 +70,7 @@ static const char * const s_mem_group_names[] = {
"CAU_PI",
"CAU_MEM",
"PXP_ILT",
+ "PBUF",
"MULD_MEM",
"BTB_MEM",
"IGU_MEM",
@@ -88,48 +81,59 @@ static const char * const s_mem_group_names[] = {
};
/* Idle check conditions */
-static u32 cond4(const u32 *r, const u32 *imm)
+
+static u32 cond5(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
}
-static u32 cond6(const u32 *r, const u32 *imm)
+static u32 cond7(const u32 *r, const u32 *imm)
{
return ((r[0] >> imm[0]) & imm[1]) != imm[2];
}
-static u32 cond5(const u32 *r, const u32 *imm)
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
{
return (r[0] & imm[0]) != imm[1];
}
-static u32 cond8(const u32 *r, const u32 *imm)
+static u32 cond9(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) !=
(((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
}
-static u32 cond9(const u32 *r, const u32 *imm)
+static u32 cond10(const u32 *r, const u32 *imm)
{
return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
}
-static u32 cond1(const u32 *r, const u32 *imm)
+static u32 cond4(const u32 *r, const u32 *imm)
{
return (r[0] & ~imm[0]) != imm[1];
}
static u32 cond0(const u32 *r, const u32 *imm)
{
+ return (r[0] & ~r[1]) != imm[0];
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
return r[0] != imm[0];
}
-static u32 cond10(const u32 *r, const u32 *imm)
+static u32 cond11(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] == imm[0];
}
-static u32 cond11(const u32 *r, const u32 *imm)
+static u32 cond12(const u32 *r, const u32 *imm)
{
return r[0] != r[1] && r[2] > imm[0];
}
@@ -139,12 +143,12 @@ static u32 cond3(const u32 *r, const u32 *imm)
return r[0] != r[1];
}
-static u32 cond12(const u32 *r, const u32 *imm)
+static u32 cond13(const u32 *r, const u32 *imm)
{
return r[0] & imm[0];
}
-static u32 cond7(const u32 *r, const u32 *imm)
+static u32 cond8(const u32 *r, const u32 *imm)
{
return r[0] < (r[1] - imm[0]);
}
@@ -169,6 +173,8 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond10,
cond11,
cond12,
+ cond13,
+ cond14,
};
/******************************* Data Types **********************************/
@@ -181,11 +187,6 @@ enum platform_ids {
MAX_PLATFORM_IDS
};
-struct dbg_array {
- const u32 *ptr;
- u32 size_in_dwords;
-};
-
struct chip_platform_defs {
u8 num_ports;
u8 num_pfs;
@@ -204,7 +205,9 @@ struct platform_defs {
u32 delay_factor;
};
-/* Storm constant definitions */
+/* Storm constant definitions.
+ * Addresses are in bytes, sizes are in quad-regs.
+ */
struct storm_defs {
char letter;
enum block_id block_id;
@@ -218,13 +221,13 @@ struct storm_defs {
u32 sem_sync_dbg_empty_addr;
u32 sem_slow_dbg_empty_addr;
u32 cm_ctx_wr_addr;
- u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_lid_size;
u32 cm_conn_ag_ctx_rd_addr;
- u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_lid_size;
u32 cm_conn_st_ctx_rd_addr;
- u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_lid_size;
u32 cm_task_ag_ctx_rd_addr;
- u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_lid_size;
u32 cm_task_st_ctx_rd_addr;
};
@@ -233,17 +236,23 @@ struct block_defs {
const char *name;
bool has_dbg_bus[MAX_CHIP_IDS];
bool associated_to_storm;
- u32 storm_id; /* Valid only if associated_to_storm is true */
+
+ /* Valid only if associated_to_storm is true */
+ u32 storm_id;
enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
u32 dbg_select_addr;
- u32 dbg_cycle_enable_addr;
+ u32 dbg_enable_addr;
u32 dbg_shift_addr;
u32 dbg_force_valid_addr;
u32 dbg_force_frame_addr;
bool has_reset_bit;
- bool unreset; /* If true, the block is taken out of reset before dump */
+
+ /* If true, block is taken out of reset before dump */
+ bool unreset;
enum dbg_reset_regs reset_reg;
- u8 reset_bit_offset; /* Bit offset in reset register */
+
+ /* Bit offset in reset register */
+ u8 reset_bit_offset;
};
/* Reset register definitions */
@@ -262,12 +271,13 @@ struct grc_param_defs {
u32 crash_preset_val;
};
+/* Address is in 128b units. Width is in bits. */
struct rss_mem_defs {
const char *mem_name;
const char *type_name;
- u32 addr; /* In 128b units */
+ u32 addr;
u32 num_entries[MAX_CHIP_IDS];
- u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+ u32 entry_width[MAX_CHIP_IDS];
};
struct vfc_ram_defs {
@@ -289,10 +299,20 @@ struct big_ram_defs {
struct phy_defs {
const char *phy_name;
+
+ /* PHY base GRC address */
u32 base_addr;
+
+ /* Relative address of indirect TBUS address register (bits 0..7) */
u32 tbus_addr_lo_addr;
+
+ /* Relative address of indirect TBUS address register (bits 8..10) */
u32 tbus_addr_hi_addr;
+
+ /* Relative address of indirect TBUS data register (bits 0..7) */
u32 tbus_data_lo_addr;
+
+ /* Relative address of indirect TBUS data register (bits 8..11) */
u32 tbus_data_hi_addr;
};
@@ -300,9 +320,11 @@ struct phy_defs {
#define MAX_LCIDS 320
#define MAX_LTIDS 320
+
#define NUM_IOR_SETS 2
#define IORS_PER_SET 176
#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+
#define BYTES_IN_DWORD sizeof(u32)
/* In the macros below, size and offset are specified in bits */
@@ -315,6 +337,7 @@ struct phy_defs {
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
FIELD_DWORD_SHIFT(type, field))
+
#define SET_VAR_FIELD(var, type, field, val) \
do { \
var[FIELD_DWORD_OFFSET(type, field)] &= \
@@ -322,31 +345,51 @@ struct phy_defs {
var[FIELD_DWORD_OFFSET(type, field)] |= \
(val) << FIELD_DWORD_SHIFT(type, field); \
} while (0)
+
#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
qed_wr(dev, ptt, addr, (arr)[i]); \
} while (0)
+
#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
do { \
for (i = 0; i < (arr_size); i++) \
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
+#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#endif
+#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#endif
+
+/* extra lines include a signature line + optional latency events line */
+#ifndef NUM_DBG_LINES
+#define NUM_EXTRA_DBG_LINES(block_desc) \
+ (1 + ((block_desc)->has_latency_events ? 1 : 0))
+#define NUM_DBG_LINES(block_desc) \
+ ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
+#endif
+
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+
#define REG_DUMP_LEN_SHIFT 24
#define MEM_DUMP_ENTRY_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+
#define IDLE_CHK_RULE_SIZE_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+
#define IDLE_CHK_RESULT_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+
#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+
#define IDLE_CHK_MAX_ENTRIES_SIZE 32
/* The sizes and offsets below are specified in bits */
@@ -363,62 +406,92 @@ struct phy_defs {
#define VFC_RAM_ADDR_ROW_OFFSET 2
#define VFC_RAM_ADDR_ROW_SIZE 10
#define VFC_RAM_RESP_STRUCT_SIZE 256
+
#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+
#define NUM_VFC_RAM_TYPES 4
+
#define VFC_CAM_NUM_ROWS 512
+
#define VFC_OPCODE_CAM_RD 14
#define VFC_OPCODE_RAM_RD 0
+
#define NUM_RSS_MEM_TYPES 5
+
#define NUM_BIG_RAM_TYPES 3
#define BIG_RAM_BLOCK_SIZE_BYTES 128
#define BIG_RAM_BLOCK_SIZE_DWORDS \
BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+
#define NUM_PHY_TBUS_ADDRESSES 2048
#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+
#define RESET_REG_UNRESET_OFFSET 4
+
#define STALL_DELAY_MS 500
+
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_DBG_BUS_LINES 256
+
#define NUM_COMMON_GLOBAL_PARAMS 8
+
#define FW_IMG_MAIN 1
-#define REG_FIFO_DEPTH_ELEMENTS 32
+
+#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
+#endif
+#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
-#define IGU_FIFO_DEPTH_ELEMENTS 64
+
+#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
+#endif
+#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
-#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+
+#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#endif
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+
#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
(MCP_REG_SCRATCH + \
offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
- {0, 0, 0}, {0, 0, 0} } },
- { "k2",
- { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
- {0, 0, 0}, {0, 0, 0} } }
+ { "bb",
+ {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
+ {0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0} } },
+ { "ah",
+ {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
+ {0, 0, 0},
+ {0, 0, 0},
+ {0, 0, 0} } }
};
/* Storm constant definitions array */
@@ -427,69 +500,74 @@ static struct storm_defs s_storm_defs[] = {
{'T', BLOCK_TSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
- TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
- TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
TCM_REG_CTX_RBC_ACCS,
4, TCM_REG_AGG_CON_CTX,
16, TCM_REG_SM_CON_CTX,
2, TCM_REG_AGG_TASK_CTX,
4, TCM_REG_SM_TASK_CTX},
+
/* Mstorm */
{'M', BLOCK_MSEM,
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
- MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
- MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
MCM_REG_CTX_RBC_ACCS,
1, MCM_REG_AGG_CON_CTX,
10, MCM_REG_SM_CON_CTX,
2, MCM_REG_AGG_TASK_CTX,
7, MCM_REG_SM_TASK_CTX},
+
/* Ustorm */
{'U', BLOCK_USEM,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
- USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
- USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
UCM_REG_CTX_RBC_ACCS,
2, UCM_REG_AGG_CON_CTX,
13, UCM_REG_SM_CON_CTX,
3, UCM_REG_AGG_TASK_CTX,
3, UCM_REG_SM_TASK_CTX},
+
/* Xstorm */
{'X', BLOCK_XSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
- XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
- XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
XCM_REG_CTX_RBC_ACCS,
9, XCM_REG_AGG_CON_CTX,
15, XCM_REG_SM_CON_CTX,
0, 0,
0, 0},
+
/* Ystorm */
{'Y', BLOCK_YSEM,
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
- YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
- YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
YCM_REG_CTX_RBC_ACCS,
2, YCM_REG_AGG_CON_CTX,
3, YCM_REG_SM_CON_CTX,
2, YCM_REG_AGG_TASK_CTX,
12, YCM_REG_SM_TASK_CTX},
+
/* Pstorm */
{'P', BLOCK_PSEM,
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
- PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
- PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
+ PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
PCM_REG_CTX_RBC_ACCS,
0, 0,
10, PCM_REG_SM_CON_CTX,
@@ -498,6 +576,7 @@ static struct storm_defs s_storm_defs[] = {
};
/* Block definitions array */
+
static struct block_defs block_grc_defs = {
"grc",
{true, true}, false, 0,
@@ -587,9 +666,11 @@ static struct block_defs block_pcie_defs = {
"pcie",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
- PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
- PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ PCIE_REG_DBG_COMMON_SELECT_K2,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
+ PCIE_REG_DBG_COMMON_SHIFT_K2,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -691,9 +772,9 @@ static struct block_defs block_pglcs_defs = {
"pglcs",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
- PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
- PGLCS_REG_DBG_FORCE_FRAME,
+ PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
+ PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
+ PGLCS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2
};
@@ -991,10 +1072,11 @@ static struct block_defs block_yuld_defs = {
"yuld",
{true, true}, false, 0,
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
- YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
- YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
- YULD_REG_DBG_FORCE_FRAME,
- true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+ YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
+ YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
+ YULD_REG_DBG_FORCE_FRAME_BB_K2,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ 15
};
static struct block_defs block_xyld_defs = {
@@ -1143,9 +1225,9 @@ static struct block_defs block_umac_defs = {
"umac",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
- UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
- UMAC_REG_DBG_FORCE_FRAME,
+ UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
+ UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
+ UMAC_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6
};
@@ -1177,9 +1259,9 @@ static struct block_defs block_wol_defs = {
"wol",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
- WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
- WOL_REG_DBG_FORCE_FRAME,
+ WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
+ WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
+ WOL_REG_DBG_FORCE_FRAME_K2,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
};
@@ -1187,9 +1269,9 @@ static struct block_defs block_bmbn_defs = {
"bmbn",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
- BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
- BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
- BMBN_REG_DBG_FORCE_FRAME,
+ BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
+ BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
+ BMBN_REG_DBG_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1204,9 +1286,9 @@ static struct block_defs block_nwm_defs = {
"nwm",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
- NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
- NWM_REG_DBG_FORCE_FRAME,
+ NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
+ NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
+ NWM_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
};
@@ -1214,9 +1296,9 @@ static struct block_defs block_nws_defs = {
"nws",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
- NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
- NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
- NWS_REG_DBG_FORCE_FRAME,
+ NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
+ NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
+ NWS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
@@ -1224,9 +1306,9 @@ static struct block_defs block_ms_defs = {
"ms",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
- MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
- MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
- MS_REG_DBG_FORCE_FRAME,
+ MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
+ MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
+ MS_REG_DBG_FORCE_FRAME_K2,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
@@ -1234,9 +1316,11 @@ static struct block_defs block_phy_pcie_defs = {
"phy_pcie",
{false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
- PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
- PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
- PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ PCIE_REG_DBG_COMMON_SELECT_K2,
+ PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
+ PCIE_REG_DBG_COMMON_SHIFT_K2,
+ PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1261,6 +1345,13 @@ static struct block_defs block_rgfs_defs = {
false, false, MAX_DBG_RESET_REGS, 0
};
+static struct block_defs block_rgsrc_defs = {
+ "rgsrc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
@@ -1268,6 +1359,13 @@ static struct block_defs block_tgfs_defs = {
false, false, MAX_DBG_RESET_REGS, 0
};
+static struct block_defs block_tgsrc_defs = {
+ "tgsrc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
@@ -1350,6 +1448,8 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_muld_defs,
&block_yuld_defs,
&block_xyld_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_prm_defs,
&block_pbf_pb1_defs,
&block_pbf_pb2_defs,
@@ -1363,6 +1463,10 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_tcfc_defs,
&block_igu_defs,
&block_cau_defs,
+ &block_rgfs_defs,
+ &block_rgsrc_defs,
+ &block_tgfs_defs,
+ &block_tgsrc_defs,
&block_umac_defs,
&block_xmac_defs,
&block_dbg_defs,
@@ -1376,10 +1480,6 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_phy_pcie_defs,
&block_led_defs,
&block_avs_wrap_defs,
- &block_rgfs_defs,
- &block_tgfs_defs,
- &block_ptld_defs,
- &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1392,66 +1492,151 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 1, 1},
+
+ /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_VFC */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_IGU */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_BMB */
+ {{0, 0}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_UNSTALL */
+ {{0, 0}, 0, 1, false, 0, 0},
+
+ /* DBG_GRC_PARAM_NUM_LCIDS */
{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
- MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ MAX_LCIDS},
+
+ /* DBG_GRC_PARAM_NUM_LTIDS */
{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
- MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
- {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
- {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
+ MAX_LTIDS},
+
+ /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0},
+
+ /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, true, 0, 0},
+
+ /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{0, 0}, 0, 1, false, 1, 0},
+
+ /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_DUMP_PHY */
+ {{1, 1}, 0, 1, false, 0, 1},
+
+ /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0},
+
+ /* DBG_GRC_PARAM_NO_FW_VER */
+ {{0, 0}, 0, 1, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
{256, 320},
{32, 32} },
+
{ "rss_mem_key_msb", "rss_key", 1024,
{128, 208},
{256, 256} },
+
{ "rss_mem_key_lsb", "rss_key", 2048,
{128, 208},
{64, 64} },
+
{ "rss_mem_info", "rss_info", 3072,
{128, 208},
{16, 16} },
+
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 208)},
+ {16384, 26624},
{16, 16} }
};
@@ -1466,50 +1651,71 @@ static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
{4800, 5632} },
+
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
{2880, 3680} },
+
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
{1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
+ /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
- { MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
+ {false, true} },
+
+ /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} },
+
+ /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} },
};
static struct phy_defs s_phy_defs[] = {
- {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
- {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
- {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
- {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"nw_phy", NWS_REG_NWS_CMU_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
/**************************** Private Functions ******************************/
@@ -1556,7 +1762,7 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->chip_id = CHIP_K2;
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
- dev_data->chip_id = CHIP_BB_B0;
+ dev_data->chip_id = CHIP_BB;
dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
@@ -1569,9 +1775,20 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
qed_dbg_grc_init_params(p_hwfn);
dev_data->initialized = true;
+
return DBG_STATUS_OK;
}
+static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
+ MAX_CHIP_IDS +
+ dev_data->chip_id];
+}
+
/* Reads the FW info structure for the specified Storm from the chip,
* and writes it to the specified fw_info pointer.
*/
@@ -1579,25 +1796,28 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 storm_id, struct fw_info *fw_info)
{
- /* Read first the address that points to fw_info location.
- * The address is located in the last line of the Storm RAM.
- */
- u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_INT_RAM +
- DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
- sizeof(struct fw_info_location);
+ struct storm_defs *storm = &s_storm_defs[storm_id];
struct fw_info_location fw_info_location;
- u32 *dest = (u32 *)&fw_info_location;
- u32 i;
+ u32 addr, i, *dest;
memset(&fw_info_location, 0, sizeof(fw_info_location));
memset(fw_info, 0, sizeof(*fw_info));
+
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+ dest = (u32 *)&fw_info_location;
+
for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+
+ /* Read FW version info from Storm RAM */
if (fw_info_location.size > 0 && fw_info_location.size <=
sizeof(*fw_info)) {
- /* Read FW version info from Storm RAM */
addr = fw_info_location.grc_addr;
dest = (u32 *)fw_info;
for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
@@ -1606,27 +1826,30 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
}
}
-/* Dumps the specified string to the specified buffer. Returns the dumped size
- * in bytes (actual length + 1 for the null character termination).
+/* Dumps the specified string to the specified buffer.
+ * Returns the dumped size in bytes.
*/
static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
{
if (dump)
strcpy(dump_buf, str);
+
return (u32)strlen(str) + 1;
}
-/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
- * in bytes.
+/* Dumps zeros to align the specified buffer to dwords.
+ * Returns the dumped size in bytes.
*/
static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
{
- u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+ u8 offset_in_dword, align_size;
+ offset_in_dword = (u8)(byte_offset & 0x3);
align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
if (dump && align_size)
memset(dump_buf, 0, align_size);
+
return align_size;
}
@@ -1653,6 +1876,7 @@ static u32 qed_dump_str_param(u32 *dump_buf,
/* Align buffer to next dword */
offset += qed_dump_align(char_buf + offset, dump, offset);
+
return BYTES_TO_DWORDS(offset);
}
@@ -1681,6 +1905,7 @@ static u32 qed_dump_num_param(u32 *dump_buf,
if (dump)
*(dump_buf + offset) = param_val;
offset++;
+
return offset;
}
@@ -1695,7 +1920,6 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
struct fw_info fw_info = { {0}, {0} };
- int printed_chars;
u32 offset = 0;
if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
@@ -1705,37 +1929,32 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
storm_id++) {
- /* Read FW version/image */
- if (!dev_data->block_in_reset
- [s_storm_defs[storm_id].block_id]) {
- /* read FW info for the current Storm */
- qed_read_fw_info(p_hwfn,
- p_ptt, storm_id, &fw_info);
-
- /* Create FW version/image strings */
- printed_chars =
- snprintf(fw_ver_str,
- sizeof(fw_ver_str),
- "%d_%d_%d_%d",
- fw_info.ver.num.major,
- fw_info.ver.num.minor,
- fw_info.ver.num.rev,
- fw_info.ver.num.eng);
- if (printed_chars < 0 || printed_chars >=
- sizeof(fw_ver_str))
- DP_NOTICE(p_hwfn,
- "Unexpected debug error: invalid FW version string\n");
- switch (fw_info.ver.image_id) {
- case FW_IMG_MAIN:
- strcpy(fw_img_str, "main");
- break;
- default:
- strcpy(fw_img_str, "unknown");
- break;
- }
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+
+ /* Read FW version/image */
+ if (dev_data->block_in_reset[storm->block_id])
+ continue;
- found = true;
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ if (snprintf(fw_ver_str, sizeof(fw_ver_str),
+ "%d_%d_%d_%d", fw_info.ver.num.major,
+ fw_info.ver.num.minor, fw_info.ver.num.rev,
+ fw_info.ver.num.eng) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
}
+
+ found = true;
}
}
@@ -1747,6 +1966,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"fw-timestamp", fw_info.ver.timestamp);
+
return offset;
}
@@ -1759,17 +1979,18 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
- int printed_chars;
- /* Find MCP public data GRC address.
- * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ /* Find MCP public data GRC address. Needs to be ORed with
+ * MCP_REG_SCRATCH due to a HW bug.
*/
- public_data_addr = qed_rd(p_hwfn, p_ptt,
+ public_data_addr = qed_rd(p_hwfn,
+ p_ptt,
MISC_REG_SHARED_MEM_ADDR) |
- MCP_REG_SCRATCH;
+ MCP_REG_SCRATCH;
/* Find MCP public global section offset */
global_section_offsize_addr = public_data_addr +
@@ -1778,9 +1999,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
sizeof(offsize_t) * PUBLIC_GLOBAL;
global_section_offsize = qed_rd(p_hwfn, p_ptt,
global_section_offsize_addr);
- global_section_addr = MCP_REG_SCRATCH +
- (global_section_offsize &
- OFFSIZE_OFFSET_MASK) * 4;
+ global_section_addr =
+ MCP_REG_SCRATCH +
+ (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
/* Read MFW version from MCP public global section */
mfw_ver = qed_rd(p_hwfn, p_ptt,
@@ -1788,13 +2009,9 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
offsetof(struct public_global, mfw_ver));
/* Dump MFW version param */
- printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
- "%d_%d_%d_%d",
- (u8) (mfw_ver >> 24),
- (u8) (mfw_ver >> 16),
- (u8) (mfw_ver >> 8),
- (u8) mfw_ver);
- if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
+ (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
+ (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid MFW version string\n");
}
@@ -1820,11 +2037,12 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
bool dump,
u8 num_specific_global_params)
{
- u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
+ u8 num_params;
- /* Find platform string and dump global params section header */
+ /* Dump global params section header */
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "global_params", num_params);
@@ -1846,25 +2064,29 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
offset +=
qed_dump_num_param(dump_buf + offset, dump, "pci-func",
p_hwfn->abs_pf_id);
+
return offset;
}
-/* Writes the last section to the specified buffer at the given offset.
- * Returns the dumped size in dwords.
+/* Writes the "last" section (including CRC) to the specified buffer at the
+ * given offset. Returns the dumped size in dwords.
*/
-static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 offset, bool dump)
{
- u32 start_offset = offset, crc = ~0;
+ u32 start_offset = offset;
/* Dump CRC section header */
offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
- /* Calculate CRC32 and add it to the dword following the "last" section.
- */
+ /* Calculate CRC32 and add it to the dword after the "last" section */
if (dump)
- *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ *(dump_buf + offset) = ~crc32(0xffffffff,
+ (u8 *)dump_buf,
DWORDS_TO_BYTES(offset));
+
offset++;
+
return offset - start_offset;
}
@@ -1883,11 +2105,12 @@ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
p_ptt, s_reset_regs_defs[i].addr);
/* Check if blocks are in reset */
- for (i = 0; i < MAX_BLOCK_ID; i++)
- dev_data->block_in_reset[i] =
- s_block_defs[i]->has_reset_bit &&
- !(reg_val[s_block_defs[i]->reset_reg] &
- BIT(s_block_defs[i]->reset_bit_offset));
+ for (i = 0; i < MAX_BLOCK_ID; i++) {
+ struct block_defs *block = s_block_defs[i];
+
+ dev_data->block_in_reset[i] = block->has_reset_bit &&
+ !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
+ }
}
/* Enable / disable the Debug block */
@@ -1902,12 +2125,12 @@ static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
- dbg_reset_reg_addr =
- s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
- new_reset_reg_val = old_reset_reg_val &
- ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
@@ -1920,8 +2143,8 @@ static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
}
-/* Enable / disable Debug Bus clients according to the specified mask.
- * (1 = enable, 0 = disable)
+/* Enable / disable Debug Bus clients according to the specified mask
+ * (1 = enable, 0 = disable).
*/
static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 client_mask)
@@ -1931,10 +2154,14 @@ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
{
- const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
bool arg1, arg2;
+ const u32 *ptr;
+ u8 tree_val;
+
+ /* Get next element from modes tree buffer */
+ ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
@@ -1974,75 +2201,81 @@ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
enum block_id block_id, u8 mem_group_id)
{
+ struct block_defs *block = s_block_defs[block_id];
u8 i;
/* Check Storm match */
- if (s_block_defs[block_id]->associated_to_storm &&
+ if (block->associated_to_storm &&
!qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ (enum dbg_storms)block->storm_id))
return false;
- for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
- if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
- mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
- return qed_grc_is_included(p_hwfn,
- s_big_ram_defs[i].grc_param);
- if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
- MEM_GROUP_PXP_MEM)
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
+ struct big_ram_defs *big_ram = &s_big_ram_defs[i];
+
+ if (mem_group_id == big_ram->mem_group_id ||
+ mem_group_id == big_ram->ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn, big_ram->grc_param);
+ }
+
+ switch (mem_group_id) {
+ case MEM_GROUP_PXP_ILT:
+ case MEM_GROUP_PXP_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
- if (mem_group_id == MEM_GROUP_RAM)
+ case MEM_GROUP_RAM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
- if (mem_group_id == MEM_GROUP_PBUF)
+ case MEM_GROUP_PBUF:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
- if (mem_group_id == MEM_GROUP_CAU_MEM ||
- mem_group_id == MEM_GROUP_CAU_SB ||
- mem_group_id == MEM_GROUP_CAU_PI)
+ case MEM_GROUP_CAU_MEM:
+ case MEM_GROUP_CAU_SB:
+ case MEM_GROUP_CAU_PI:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
- if (mem_group_id == MEM_GROUP_QM_MEM)
+ case MEM_GROUP_QM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
- mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ case MEM_GROUP_CFC_MEM:
+ case MEM_GROUP_CONN_CFC_MEM:
+ case MEM_GROUP_TASK_CFC_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
- if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
- MEM_GROUP_IGU_MSIX)
+ case MEM_GROUP_IGU_MEM:
+ case MEM_GROUP_IGU_MSIX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
- if (mem_group_id == MEM_GROUP_MULD_MEM)
+ case MEM_GROUP_MULD_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
- if (mem_group_id == MEM_GROUP_PRS_MEM)
+ case MEM_GROUP_PRS_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
- if (mem_group_id == MEM_GROUP_DMAE_MEM)
+ case MEM_GROUP_DMAE_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
- if (mem_group_id == MEM_GROUP_TM_MEM)
+ case MEM_GROUP_TM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
- if (mem_group_id == MEM_GROUP_SDM_MEM)
+ case MEM_GROUP_SDM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
- if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
- MEM_GROUP_RDIF_CTX)
+ case MEM_GROUP_TDIF_CTX:
+ case MEM_GROUP_RDIF_CTX:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
- if (mem_group_id == MEM_GROUP_CM_MEM)
+ case MEM_GROUP_CM_MEM:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
- if (mem_group_id == MEM_GROUP_IOR)
+ case MEM_GROUP_IOR:
return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
-
- return true;
+ default:
+ return true;
+ }
}
/* Stalls all Storms */
static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool stall)
{
- u8 reg_val = stall ? 1 : 0;
+ u32 reg_addr;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- u32 reg_addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0;
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
- qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
- }
+ reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0_BB_K2;
+ qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
msleep(STALL_DELAY_MS);
@@ -2054,24 +2287,29 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
- u32 i;
+ u32 block_id, i;
/* Fill reset regs values */
- for (i = 0; i < MAX_BLOCK_ID; i++)
- if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
- reg_val[s_block_defs[i]->reset_reg] |=
- BIT(s_block_defs[i]->reset_bit_offset);
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ struct block_defs *block = s_block_defs[block_id];
+
+ if (block->has_reset_bit && block->unreset)
+ reg_val[block->reset_reg] |=
+ BIT(block->reset_bit_offset);
+ }
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
- reg_val[i] |= s_reset_regs_defs[i].unreset_val;
- if (reg_val[i])
- qed_wr(p_hwfn,
- p_ptt,
- s_reset_regs_defs[i].addr +
- RESET_REG_UNRESET_OFFSET, reg_val[i]);
- }
+ if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ continue;
+
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
}
}
@@ -2095,6 +2333,7 @@ qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
qed_get_block_attn_data(block_id, attn_type);
*num_attn_regs = block_type_data->num_regs;
+
return &((const struct dbg_attn_reg *)
s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
regs_offset];
@@ -2105,34 +2344,34 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ const struct dbg_attn_reg *attn_reg_arr;
u8 reg_idx, num_attn_regs;
u32 block_id;
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- const struct dbg_attn_reg *attn_reg_arr;
-
if (dev_data->block_in_reset[block_id])
continue;
attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
+
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
/* Check mode */
- bool eval_mode = GET_FIELD(reg_data->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- u16 modes_buf_offset =
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ /* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
- /* Mode match - read parity status read-clear
- * register.
- */
qed_rd(p_hwfn, p_ptt,
DWORDS_TO_BYTES(reg_data->
sts_clr_address));
@@ -2142,11 +2381,11 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* The following parameters are dumped:
- * - 'count' = num_dumped_entries
- * - 'split' = split_type
- * - 'id' = split_id (dumped only if split_id >= 0)
- * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
- * param_val != NULL)
+ * - count: no. of dumped entries
+ * - split: split type
+ * - id: split ID (dumped only if split_id >= 0)
+ * - param_name: user parameter value (dumped only if param_name != NULL
+ * and param_val != NULL).
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
@@ -2170,84 +2409,100 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
if (param_name && param_val)
offset += qed_dump_str_param(dump_buf + offset,
dump, param_name, param_val);
+
return offset;
}
/* Dumps the GRC registers in the specified address range.
* Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 len)
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus)
{
u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
- if (dump)
- for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- else
- offset += len;
+ if (!dump)
+ return len;
+
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+
return offset;
}
-/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
-static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
- u32 len)
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
{
if (dump)
*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+
return 1;
}
-/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
+/* Dumps GRC registers sequence. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 len)
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus)
{
u32 offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
- dump_buf + offset, dump, addr, len);
+ dump_buf + offset,
+ dump, addr, len, wide_bus);
+
return offset;
}
/* Dumps GRC registers sequence with skip cycle.
* Returns the dumped size in dwords.
+ * - addr: start GRC address in dwords
+ * - total_len: total no. of dwords to dump
+ * - read_len: no. consecutive dwords to read
+ * - skip_len: no. of dwords to skip (and fill with zeros)
*/
static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf,
- bool dump, u32 addr, u32 total_len,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 addr,
+ u32 total_len,
u32 read_len, u32 skip_len)
{
u32 offset = 0, reg_offset = 0;
offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
- if (dump) {
- while (reg_offset < total_len) {
- u32 curr_len = min_t(u32,
- read_len,
- total_len - reg_offset);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump, addr, curr_len);
+
+ if (!dump)
+ return offset + total_len;
+
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
+
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len, false);
+ reg_offset += curr_len;
+ addr += curr_len;
+
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32, skip_len, total_len - skip_len);
+ memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
reg_offset += curr_len;
addr += curr_len;
- if (reg_offset < total_len) {
- curr_len = min_t(u32,
- skip_len,
- total_len - skip_len);
- memset(dump_buf + offset, 0,
- DWORDS_TO_BYTES(curr_len));
- offset += curr_len;
- reg_offset += curr_len;
- addr += curr_len;
- }
}
- } else {
- offset += total_len;
}
return offset;
@@ -2266,43 +2521,48 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
bool mode_match = true;
*num_dumped_reg_entries = 0;
+
while (input_offset < input_regs_arr.size_in_dwords) {
const struct dbg_dump_cond_hdr *cond_hdr =
(const struct dbg_dump_cond_hdr *)
&input_regs_arr.ptr[input_offset++];
- bool eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset;
+ bool eval_mode;
/* Check mode/block */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
- if (mode_match && block_enable[cond_hdr->block_id]) {
- for (i = 0; i < cond_hdr->data_size;
- i++, input_offset++) {
- const struct dbg_dump_reg *reg =
- (const struct dbg_dump_reg *)
- &input_regs_arr.ptr[input_offset];
- u32 addr, len;
-
- addr = GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS);
- len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
- offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset,
- dump,
- addr,
- len);
- (*num_dumped_reg_entries)++;
- }
- } else {
+ if (!mode_match || !block_enable[cond_hdr->block_id]) {
input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ bool wide_bus;
+
+ addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
+ wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ wide_bus);
+ (*num_dumped_reg_entries)++;
}
}
@@ -2350,8 +2610,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
return num_dumped_reg_entries > 0 ? offset : 0;
}
-/* Dumps registers according to the input registers array.
- * Returns the dumped size in dwords.
+/* Dumps registers according to the input registers array. Returns the dumped
+ * size in dwords.
*/
static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2361,29 +2621,37 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct chip_platform_defs *p_platform_defs;
+ struct chip_platform_defs *chip_platform;
u32 offset = 0, input_offset = 0;
- struct chip_defs *p_chip_defs;
+ struct chip_defs *chip;
u8 port_id, pf_id, vf_id;
u16 fid;
- p_chip_defs = &s_chip_defs[dev_data->chip_id];
- p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
+ chip = &s_chip_defs[dev_data->chip_id];
+ chip_platform = &chip->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
- const struct dbg_dump_split_hdr *split_hdr =
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct dbg_array curr_input_regs_arr;
+ u32 split_data_size;
+ u8 split_type_id;
+
+ split_hdr =
(const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
- u8 split_type_id = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- u32 split_data_size = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- struct dbg_array curr_input_regs_arr = {
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
- split_data_size};
+ split_type_id =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_regs_arr.ptr =
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
+ curr_input_regs_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
@@ -2398,8 +2666,9 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_name,
param_val);
break;
+
case SPLIT_TYPE_PORT:
- for (port_id = 0; port_id < p_platform_defs->num_ports;
+ for (port_id = 0; port_id < chip_platform->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2414,9 +2683,10 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
}
break;
+
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
+ for (pf_id = 0; pf_id < chip_platform->num_pfs;
pf_id++) {
u8 pfid_shift =
PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
@@ -2427,17 +2697,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
}
offset +=
- qed_grc_dump_split_data(p_hwfn, p_ptt,
+ qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
curr_input_regs_arr,
dump_buf + offset,
- dump, block_enable,
- "pf", pf_id,
+ dump,
+ block_enable,
+ "pf",
+ pf_id,
param_name,
param_val);
}
break;
+
case SPLIT_TYPE_VF:
- for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ for (vf_id = 0; vf_id < chip_platform->num_vfs;
vf_id++) {
u8 vfvalid_shift =
PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
@@ -2460,6 +2734,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
}
break;
+
default:
break;
}
@@ -2490,35 +2765,37 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
- if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
- u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+ if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
+ continue;
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- num_regs++;
- }
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs[i].addr), 1,
+ false);
+ num_regs++;
}
/* Write header */
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, "eng", -1, NULL, NULL);
+
return offset;
}
-/* Dump registers that are modified during GRC Dump and therefore must be dumped
- * first. Returns the dumped size in dwords.
+/* Dump registers that are modified during GRC Dump and therefore must be
+ * dumped first. Returns the dumped size in dwords.
*/
static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, num_reg_entries = 0, block_id;
+ u32 block_id, offset = 0, num_reg_entries = 0;
+ const struct dbg_attn_reg *attn_reg_arr;
u8 storm_id, reg_idx, num_attn_regs;
/* Calculate header size */
@@ -2527,14 +2804,13 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Write parity registers */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- const struct dbg_attn_reg *attn_reg_arr;
-
if (dev_data->block_in_reset[block_id] && dump)
continue;
attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
ATTN_TYPE_PARITY,
&num_attn_regs);
+
for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
const struct dbg_attn_reg *reg_data =
&attn_reg_arr[reg_idx];
@@ -2548,37 +2824,36 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
modes_buf_offset =
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
- if (!eval_mode ||
- qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
- /* Mode match - read and dump registers */
- addr = reg_data->mask_address;
- offset +=
- qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- addr = GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS);
- offset +=
- qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- 1);
- num_reg_entries += 2;
- }
+ if (eval_mode &&
+ !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match: read & dump registers */
+ addr = reg_data->mask_address;
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false);
+ num_reg_entries += 2;
}
}
- /* Write storm stall status registers */
+ /* Write Storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 addr;
- if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
- dump)
+ if (dev_data->block_in_reset[storm->block_id] && dump)
continue;
addr =
@@ -2589,7 +2864,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- 1);
+ 1,
+ false);
num_reg_entries++;
}
@@ -2598,6 +2874,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
qed_grc_dump_regs_hdr(dump_buf,
true,
num_reg_entries, "eng", -1, NULL, NULL);
+
return offset;
}
@@ -2637,17 +2914,17 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Dumps a GRC memory header (section and params).
- * The following parameters are dumped:
- * name - name is dumped only if it's not NULL.
- * addr - addr is dumped only if name is NULL.
- * len - len is always dumped.
- * width - bit_width is dumped if it's not zero.
- * packed - packed=1 is dumped if it's not false.
- * mem_group - mem_group is always dumped.
- * is_storm - true only if the memory is related to a Storm.
- * storm_letter - storm letter (valid only if is_storm is true).
- * Returns the dumped size in dwords.
+/* Dumps a GRC memory header (section and params). Returns the dumped size in
+ * dwords. The following parameters are dumped:
+ * - name: dumped only if it's not NULL.
+ * - addr: in dwords, dumped only if name is NULL.
+ * - len: in dwords, always dumped.
+ * - width: dumped if it's not zero.
+ * - packed: dumped only if it's not false.
+ * - mem_group: always dumped.
+ * - is_storm: true only if the memory is related to a Storm.
+ * - storm_letter: valid only if is_storm is true.
+ *
*/
static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
@@ -2667,6 +2944,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+
if (bit_width)
num_params++;
if (packed)
@@ -2675,6 +2953,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
/* Dump section header */
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "grc_mem", num_params);
+
if (name) {
/* Dump name */
if (is_storm) {
@@ -2694,14 +2973,15 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
len, buf);
} else {
/* Dump address */
+ u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
+
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr",
- DWORDS_TO_BYTES(addr));
+ dump, "addr", addr_in_bytes);
if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- len, (u32)DWORDS_TO_BYTES(addr));
+ len, addr_in_bytes);
}
/* Dump len */
@@ -2727,11 +3007,13 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
}
offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+
return offset;
}
/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
* Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
*/
static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2740,6 +3022,7 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
const char *name,
u32 addr,
u32 len,
+ bool wide_bus,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2758,7 +3041,9 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
mem_group, is_storm, storm_letter);
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
- dump_buf + offset, dump, addr, len);
+ dump_buf + offset,
+ dump, addr, len, wide_bus);
+
return offset;
}
@@ -2773,20 +3058,21 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
while (input_offset < input_mems_arr.size_in_dwords) {
const struct dbg_dump_cond_hdr *cond_hdr;
+ u16 modes_buf_offset;
u32 num_entries;
bool eval_mode;
cond_hdr = (const struct dbg_dump_cond_hdr *)
&input_mems_arr.ptr[input_offset++];
- eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
/* Check required mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
-
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
@@ -2796,81 +3082,87 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
continue;
}
- num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
for (i = 0; i < num_entries;
i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
const struct dbg_dump_mem *mem =
(const struct dbg_dump_mem *)
&input_mems_arr.ptr[input_offset];
- u8 mem_group_id;
+ u8 mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ bool is_storm = false, mem_wide_bus;
+ enum dbg_grc_params grc_param;
+ char storm_letter = 'a';
+ enum block_id block_id;
+ u32 mem_addr, mem_len;
- mem_group_id = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_MEM_GROUP_ID);
if (mem_group_id >= MEM_GROUPS_NUM) {
DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
return 0;
}
- if (qed_grc_is_mem_included(p_hwfn,
- (enum block_id)cond_hdr->block_id,
- mem_group_id)) {
- u32 mem_addr = GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS);
- u32 mem_len = GET_FIELD(mem->dword1,
- DBG_DUMP_MEM_LENGTH);
- enum dbg_grc_params grc_param;
- char storm_letter = 'a';
- bool is_storm = false;
-
- /* Update memory length for CCFC/TCFC memories
- * according to number of LCIDs/LTIDs.
- */
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
- if (mem_len % MAX_LCIDS != 0) {
- DP_NOTICE(p_hwfn,
- "Invalid CCFC connection memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LCIDS;
- mem_len = qed_grc_get_param(p_hwfn,
- grc_param) *
- (mem_len / MAX_LCIDS);
- } else if (mem_group_id ==
- MEM_GROUP_TASK_CFC_MEM) {
- if (mem_len % MAX_LTIDS != 0) {
- DP_NOTICE(p_hwfn,
- "Invalid TCFC task memory size\n");
- return 0;
- }
-
- grc_param = DBG_GRC_PARAM_NUM_LTIDS;
- mem_len = qed_grc_get_param(p_hwfn,
- grc_param) *
- (mem_len / MAX_LTIDS);
+ block_id = (enum block_id)cond_hdr->block_id;
+ if (!qed_grc_is_mem_included(p_hwfn,
+ block_id,
+ mem_group_id))
+ continue;
+
+ mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
+ mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
+ mem_wide_bus = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_WIDE_BUS);
+
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
}
- /* If memory is associated with Storm, update
- * Storm details.
- */
- if (s_block_defs[cond_hdr->block_id]->
- associated_to_storm) {
- is_storm = true;
- storm_letter =
- s_storm_defs[s_block_defs[
- cond_hdr->block_id]->
- storm_id].letter;
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
+ mem_len = qed_grc_get_param(p_hwfn, grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
}
- /* Dump memory */
- offset += qed_grc_dump_mem(p_hwfn, p_ptt,
- dump_buf + offset, dump, NULL,
- mem_addr, mem_len, 0,
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
+ mem_len = qed_grc_get_param(p_hwfn, grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
+
+ /* If memory is associated with Storm, update Storm
+ * details.
+ */
+ if (s_block_defs
+ [cond_hdr->block_id]->associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs
+ [cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ mem_addr,
+ mem_len,
+ mem_wide_bus,
+ 0,
false,
s_mem_group_names[mem_group_id],
- is_storm, storm_letter);
- }
- }
+ is_storm,
+ storm_letter);
+ }
}
return offset;
@@ -2887,16 +3179,22 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
- const struct dbg_dump_split_hdr *split_hdr =
- (const struct dbg_dump_split_hdr *)
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct dbg_array curr_input_mems_arr;
+ u32 split_data_size;
+ u8 split_type_id;
+
+ split_hdr = (const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
- u8 split_type_id = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
- u32 split_data_size = GET_FIELD(split_hdr->hdr,
- DBG_DUMP_SPLIT_HDR_DATA_SIZE);
- struct dbg_array curr_input_mems_arr = {
- &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
- split_data_size};
+ split_type_id =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr =
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
+ curr_input_mems_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
@@ -2906,6 +3204,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump);
break;
+
default:
DP_NOTICE(p_hwfn,
"Dumping split memories is currently not supported\n");
@@ -2920,6 +3219,7 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
/* Dumps GRC context data for the specified Storm.
* Returns the dumped size in dwords.
+ * The lid_size argument is specified in quad-regs.
*/
static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -2931,13 +3231,15 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
u32 rd_reg_addr,
u8 storm_id)
{
- u32 i, lid, total_size;
- u32 offset = 0;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 i, lid, total_size, offset = 0;
if (!lid_size)
return 0;
+
lid_size *= BYTES_IN_DWORD;
total_size = num_lids * lid_size;
+
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
@@ -2945,25 +3247,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
0,
total_size,
lid_size * 32,
- false,
- name,
- true, s_storm_defs[storm_id].letter);
+ false, name, true, storm->letter);
+
+ if (!dump)
+ return offset + total_size;
/* Dump context data */
- if (dump) {
- for (lid = 0; lid < num_lids; lid++) {
- for (i = 0; i < lid_size; i++, offset++) {
- qed_wr(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].cm_ctx_wr_addr,
- BIT(9) | lid);
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- rd_reg_addr);
- }
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt, rd_reg_addr);
}
- } else {
- offset += total_size;
}
return offset;
@@ -2973,15 +3269,19 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
+ enum dbg_grc_params grc_param;
u32 offset = 0;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+
if (!qed_grc_is_storm_included(p_hwfn,
(enum dbg_storms)storm_id))
continue;
/* Dump Conn AG context size */
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -2989,14 +3289,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"CONN_AG_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS),
- s_storm_defs[storm_id].
- cm_conn_ag_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_conn_ag_ctx_rd_addr,
+ grc_param),
+ storm->cm_conn_ag_ctx_lid_size,
+ storm->cm_conn_ag_ctx_rd_addr,
storm_id);
/* Dump Conn ST context size */
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3004,14 +3303,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"CONN_ST_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS),
- s_storm_defs[storm_id].
- cm_conn_st_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_conn_st_ctx_rd_addr,
+ grc_param),
+ storm->cm_conn_st_ctx_lid_size,
+ storm->cm_conn_st_ctx_rd_addr,
storm_id);
/* Dump Task AG context size */
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3019,14 +3317,13 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"TASK_AG_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS),
- s_storm_defs[storm_id].
- cm_task_ag_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_task_ag_ctx_rd_addr,
+ grc_param),
+ storm->cm_task_ag_ctx_lid_size,
+ storm->cm_task_ag_ctx_rd_addr,
storm_id);
/* Dump Task ST context size */
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
offset +=
qed_grc_dump_ctx_data(p_hwfn,
p_ptt,
@@ -3034,11 +3331,9 @@ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
dump,
"TASK_ST_CTX",
qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS),
- s_storm_defs[storm_id].
- cm_task_st_ctx_lid_size,
- s_storm_defs[storm_id].
- cm_task_st_ctx_rd_addr,
+ grc_param),
+ storm->cm_task_st_ctx_lid_size,
+ storm->cm_task_st_ctx_rd_addr,
storm_id);
}
@@ -3050,8 +3345,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
char buf[10] = "IOR_SET_?";
+ u32 addr, offset = 0;
u8 storm_id, set_id;
- u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
@@ -3061,11 +3356,9 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
continue;
for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 dwords, addr;
-
- dwords = storm->sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE;
- addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE) +
+ IOR_SET_OFFSET(set_id);
buf[strlen(buf) - 1] = '0' + set_id;
offset += qed_grc_dump_mem(p_hwfn,
p_ptt,
@@ -3074,6 +3367,7 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
buf,
addr,
IORS_PER_SET,
+ false,
32,
false,
"ior",
@@ -3091,10 +3385,10 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 storm_id)
{
u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
- u32 offset = 0;
- u32 row, i;
+ u32 row, i, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3103,38 +3397,34 @@ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
0,
total_size,
256,
- false,
- "vfc_cam",
- true, s_storm_defs[storm_id].letter);
- if (dump) {
- /* Prepare CAM address */
- SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
- for (row = 0; row < VFC_CAM_NUM_ROWS;
- row++, offset += VFC_CAM_RESP_DWORDS) {
- /* Write VFC CAM command */
- SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
- ARR_REG_WR(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_WR,
- cam_cmd, VFC_CAM_CMD_DWORDS);
+ false, "vfc_cam", true, storm->letter);
- /* Write VFC CAM address */
- ARR_REG_WR(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_ADDR,
- cam_addr, VFC_CAM_ADDR_DWORDS);
+ if (!dump)
+ return offset + total_size;
- /* Read VFC CAM read response */
- ARR_REG_RD(p_hwfn,
- p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_RD,
- dump_buf + offset, VFC_CAM_RESP_DWORDS);
- }
- } else {
- offset += total_size;
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
}
return offset;
@@ -3148,10 +3438,10 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
u8 storm_id, struct vfc_ram_defs *ram_defs)
{
u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
- u32 offset = 0;
- u32 row, i;
+ u32 row, i, offset = 0;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3162,7 +3452,7 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
256,
false,
ram_defs->type_name,
- true, s_storm_defs[storm_id].letter);
+ true, storm->letter);
/* Prepare RAM address */
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
@@ -3176,23 +3466,20 @@ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
/* Write VFC RAM command */
ARR_REG_WR(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_WR,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
ram_cmd, VFC_RAM_CMD_DWORDS);
/* Write VFC RAM address */
SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
ARR_REG_WR(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_ADDR,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
ram_addr, VFC_RAM_ADDR_DWORDS);
/* Read VFC RAM read response */
ARR_REG_RD(p_hwfn,
p_ptt,
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_VFC_DATA_RD,
+ storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
dump_buf + offset, VFC_RAM_RESP_DWORDS);
}
@@ -3208,28 +3495,27 @@ static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id) &&
- s_storm_defs[storm_id].has_vfc &&
- (storm_id != DBG_PSTORM_ID ||
- dev_data->platform_id == PLATFORM_ASIC)) {
- /* Read CAM */
- offset += qed_grc_dump_vfc_cam(p_hwfn,
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) ||
+ !s_storm_defs[storm_id].has_vfc ||
+ (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
+ PLATFORM_ASIC))
+ continue;
+
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
p_ptt,
dump_buf + offset,
- dump, storm_id);
-
- /* Read RAM */
- for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
- offset += qed_grc_dump_vfc_ram(p_hwfn,
- p_ptt,
- dump_buf +
- offset,
- dump,
- storm_id,
- &s_vfc_ram_defs
- [i]);
- }
+ dump,
+ storm_id,
+ &s_vfc_ram_defs[i]);
}
return offset;
@@ -3244,14 +3530,17 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
u8 rss_mem_id;
for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
- struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
- u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
- u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_dwords = (num_entries * entry_width) / 32;
- u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
- bool packed = (entry_width == 16);
- u32 rss_addr = rss_defs->addr;
- u32 i, addr;
+ u32 rss_addr, num_entries, entry_width, total_dwords, i;
+ struct rss_mem_defs *rss_defs;
+ u32 addr, size;
+ bool packed;
+
+ rss_defs = &s_rss_mem_defs[rss_mem_id];
+ rss_addr = rss_defs->addr;
+ num_entries = rss_defs->num_entries[dev_data->chip_id];
+ entry_width = rss_defs->entry_width[dev_data->chip_id];
+ total_dwords = (num_entries * entry_width) / 32;
+ packed = (entry_width == 16);
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
@@ -3263,23 +3552,23 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
packed,
rss_defs->type_name, false, 0);
+ /* Dump RSS data */
if (!dump) {
offset += total_dwords;
continue;
}
- /* Dump RSS data */
- for (i = 0; i < total_dwords;
- i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
- addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ size = RSS_REG_RSS_RAM_DATA_SIZE;
+ for (i = 0; i < total_dwords; i += size, rss_addr++) {
qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
- offset += qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf +
- offset,
- dump,
- addr,
- size);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ size,
+ false);
}
}
@@ -3316,10 +3605,11 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
BIG_RAM_BLOCK_SIZE_BYTES * 8,
false, type_name, false, 0);
+ /* Read and dump Big RAM data */
if (!dump)
return offset + ram_size;
- /* Read and dump Big RAM data */
+ /* Dump Big RAM */
for (i = 0; i < total_blocks / 2; i++) {
u32 addr, len;
@@ -3331,7 +3621,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- len);
+ len,
+ false);
}
return offset;
@@ -3359,7 +3650,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
- 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */
offset += qed_grc_dump_mem(p_hwfn,
@@ -3369,7 +3660,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
NULL,
BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
- 0, false, "MCP", false, 0);
+ false, 0, false, "MCP", false, 0);
/* Dump MCP registers */
block_enable[BLOCK_MCP] = true;
@@ -3387,11 +3678,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
- 1);
+ 1,
+ false);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
return offset;
}
@@ -3404,14 +3697,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
u8 phy_id;
for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
- struct phy_defs *phy_defs = &s_phy_defs[phy_id];
- int printed_chars;
-
- printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
- phy_defs->phy_name);
- if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
+ struct phy_defs *phy_defs;
+ u8 *bytes_buf;
+
+ phy_defs = &s_phy_defs[phy_id];
+ addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ bytes_buf = (u8 *)(dump_buf + offset);
+
+ if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name) < 0)
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid PHY memory name\n");
+
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
@@ -3419,34 +3724,26 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
0,
PHY_DUMP_SIZE_DWORDS,
16, true, mem_name, false, 0);
- if (dump) {
- u32 addr_lo_addr = phy_defs->base_addr +
- phy_defs->tbus_addr_lo_addr;
- u32 addr_hi_addr = phy_defs->base_addr +
- phy_defs->tbus_addr_hi_addr;
- u32 data_lo_addr = phy_defs->base_addr +
- phy_defs->tbus_data_lo_addr;
- u32 data_hi_addr = phy_defs->base_addr +
- phy_defs->tbus_data_hi_addr;
- u8 *bytes_buf = (u8 *)(dump_buf + offset);
-
- for (tbus_hi_offset = 0;
- tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
- tbus_hi_offset++) {
+
+ if (!dump) {
+ offset += PHY_DUMP_SIZE_DWORDS;
+ continue;
+ }
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
qed_wr(p_hwfn,
- p_ptt, addr_hi_addr, tbus_hi_offset);
- for (tbus_lo_offset = 0; tbus_lo_offset < 256;
- tbus_lo_offset++) {
- qed_wr(p_hwfn,
- p_ptt,
- addr_lo_addr, tbus_lo_offset);
- *(bytes_buf++) =
- (u8)qed_rd(p_hwfn, p_ptt,
- data_lo_addr);
- *(bytes_buf++) =
- (u8)qed_rd(p_hwfn, p_ptt,
- data_hi_addr);
- }
+ p_ptt, addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_hi_addr);
}
}
@@ -3460,16 +3757,17 @@ static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum block_id block_id,
u8 line_id,
- u8 cycle_en,
- u8 right_shift, u8 force_valid, u8 force_frame)
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
{
- struct block_defs *p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
- qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+ qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
+ qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
}
/* Dumps Static Debug data. Returns the dumped size in dwords. */
@@ -3477,10 +3775,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
- u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id;
- struct block_defs *p_block_defs;
+ u32 block_id, line_id, offset = 0;
+
+ /* Skip static debug if a debug bus recording is in progress */
+ if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ return 0;
if (dump) {
DP_VERBOSE(p_hwfn,
@@ -3488,11 +3788,11 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Disable all blocks debug output */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
- if (p_block_defs->has_dbg_bus[dev_data->chip_id])
- qed_wr(p_hwfn, p_ptt,
- p_block_defs->dbg_cycle_enable_addr, 0);
+ if (block->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
+ 0);
}
qed_bus_reset_dbg_block(p_hwfn, p_ptt);
@@ -3506,59 +3806,71 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
/* Dump all static debug lines for each relevant block */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
- p_block_defs = s_block_defs[block_id];
+ struct block_defs *block = s_block_defs[block_id];
+ struct dbg_bus_block *block_desc;
+ u32 block_dwords, addr, len;
+ u8 dbg_client_id;
- if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ if (!block->has_dbg_bus[dev_data->chip_id])
continue;
+ block_desc =
+ get_dbg_bus_block_desc(p_hwfn,
+ (enum block_id)block_id);
+ block_dwords = NUM_DBG_LINES(block_desc) *
+ STATIC_DEBUG_LINE_DWORDS;
+
/* Dump static section params */
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
- p_block_defs->name, 0,
- block_dwords, 32, false,
- "STATIC", false, 0);
-
- if (dump && !dev_data->block_in_reset[block_id]) {
- u8 dbg_client_id =
- p_block_defs->dbg_client_id[dev_data->chip_id];
- u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
- u32 len = STATIC_DEBUG_LINE_DWORDS;
-
- /* Enable block's client */
- qed_bus_enable_clients(p_hwfn, p_ptt,
- BIT(dbg_client_id));
-
- for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
- line_id++) {
- /* Configure debug line ID */
- qed_config_dbg_line(p_hwfn,
- p_ptt,
- (enum block_id)block_id,
- (u8)line_id,
- 0xf, 0, 0, 0);
+ block->name,
+ 0,
+ block_dwords,
+ 32, false, "STATIC", false, 0);
- /* Read debug line info */
- offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- len);
- }
+ if (!dump) {
+ offset += block_dwords;
+ continue;
+ }
- /* Disable block's client and debug output */
- qed_bus_enable_clients(p_hwfn, p_ptt, 0);
- qed_wr(p_hwfn, p_ptt,
- p_block_defs->dbg_cycle_enable_addr, 0);
- } else {
- /* All lines are invalid - dump zeros */
- if (dump)
- memset(dump_buf + offset, 0,
- DWORDS_TO_BYTES(block_dwords));
+ /* If all lines are invalid - dump zeros */
+ if (dev_data->block_in_reset[block_id]) {
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
offset += block_dwords;
+ continue;
+ }
+
+ /* Enable block's client */
+ dbg_client_id = block->dbg_client_id[dev_data->chip_id];
+ qed_bus_enable_clients(p_hwfn,
+ p_ptt,
+ BIT(dbg_client_id));
+
+ addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ len = STATIC_DEBUG_LINE_DWORDS;
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ true);
}
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
}
if (dump) {
@@ -3584,8 +3896,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
*num_dumped_dwords = 0;
- /* Find port mode */
if (dump) {
+ /* Find port mode */
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
case 0:
port_mode = 1;
@@ -3597,11 +3909,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
port_mode = 4;
break;
}
- }
- /* Update reset state */
- if (dump)
+ /* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -3635,7 +3946,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
}
/* Disable all parities using MFW command */
- if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
DP_NOTICE(p_hwfn,
@@ -3661,9 +3973,9 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
/* Dump all regs */
if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
- /* Dump all blocks except MCP */
bool block_enable[MAX_BLOCK_ID];
+ /* Dump all blocks except MCP */
for (i = 0; i < MAX_BLOCK_ID; i++)
block_enable[i] = true;
block_enable[BLOCK_MCP] = false;
@@ -3732,7 +4044,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
if (dump) {
/* Unstall storms */
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
@@ -3763,19 +4076,20 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_rule *rule,
u16 fail_entry_id, u32 *cond_reg_values)
{
- const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
- s_dbg_arrays
- [BIN_BUF_DBG_IDLE_CHK_REGS].
- ptr)[rule->reg_offset];
- const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- struct dbg_idle_chk_result_hdr *hdr =
- (struct dbg_idle_chk_result_hdr *)dump_buf;
- const struct dbg_idle_chk_info_reg *info_regs =
- &regs[rule->num_cond_regs].info_reg;
- u32 next_reg_offset = 0, i, offset = 0;
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_info_reg *info_regs;
+ u32 i, next_reg_offset = 0, offset = 0;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const union dbg_idle_chk_reg *regs;
u8 reg_id;
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
+ cond_regs = &regs[0].cond_reg;
+ info_regs = &regs[rule->num_cond_regs].info_reg;
+
/* Dump rule data */
if (dump) {
memset(hdr, 0, sizeof(*hdr));
@@ -3790,33 +4104,31 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
/* Dump condition register values */
for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
- /* Write register header */
- if (dump) {
- struct dbg_idle_chk_result_reg_hdr *reg_hdr =
- (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
- + offset);
- offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
- memset(reg_hdr, 0,
- sizeof(struct dbg_idle_chk_result_reg_hdr));
- reg_hdr->start_entry = reg->start_entry;
- reg_hdr->size = reg->entry_size;
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
- reg->num_entries > 1 || reg->start_entry > 0
- ? 1 : 0);
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
- /* Write register values */
- for (i = 0; i < reg_hdr->size;
- i++, next_reg_offset++, offset++)
- dump_buf[offset] =
- cond_reg_values[next_reg_offset];
- } else {
+ /* Write register header */
+ if (!dump) {
offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
reg->entry_size;
+ continue;
}
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
+ dump_buf[offset] = cond_reg_values[next_reg_offset];
}
/* Dump info register values */
@@ -3824,12 +4136,12 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
u32 block_id;
+ /* Check if register's block is in reset */
if (!dump) {
offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
continue;
}
- /* Check if register's block is in reset */
block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
if (block_id >= MAX_BLOCK_ID) {
DP_NOTICE(p_hwfn, "Invalid block_id\n");
@@ -3837,47 +4149,50 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
}
if (!dev_data->block_in_reset[block_id]) {
- bool eval_mode = GET_FIELD(reg->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- bool mode_match = true;
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool wide_bus, eval_mode, mode_match = true;
+ u16 modes_buf_offset;
+ u32 addr;
+
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
/* Check mode */
+ eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
- GET_FIELD(reg->mode.data,
- DBG_MODE_HDR_MODES_BUF_OFFSET);
+ modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
mode_match =
qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
- if (mode_match) {
- u32 addr =
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS);
-
- /* Write register header */
- struct dbg_idle_chk_result_reg_hdr *reg_hdr =
- (struct dbg_idle_chk_result_reg_hdr *)
- (dump_buf + offset);
-
- offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
- hdr->num_dumped_info_regs++;
- memset(reg_hdr, 0, sizeof(*reg_hdr));
- reg_hdr->size = reg->size;
- SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
-
- /* Write register values */
- offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- addr,
- reg->size);
- }
+ if (!mode_match)
+ continue;
+
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
+ wide_bus = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
+
+ /* Write register header */
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size, wide_bus);
}
}
@@ -3898,6 +4213,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 reg_id;
*num_failing_rules = 0;
+
for (i = 0; i < num_input_rules; i++) {
const struct dbg_idle_chk_cond_reg *cond_regs;
const struct dbg_idle_chk_rule *rule;
@@ -3920,8 +4236,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
reg_id++) {
- u32 block_id = GET_FIELD(cond_regs[reg_id].data,
- DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+ u32 block_id =
+ GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
if (block_id >= MAX_BLOCK_ID) {
DP_NOTICE(p_hwfn, "Invalid block_id\n");
@@ -3936,48 +4253,47 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
- if (!dump) {
- u32 entry_dump_size =
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- false,
- rule->rule_id,
- rule,
- 0,
- NULL);
-
- offset += num_reg_entries * entry_dump_size;
- (*num_failing_rules) += num_reg_entries;
- continue;
- }
-
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
- /* Read current entry of all condition registers */
u32 next_reg_offset = 0;
+ if (!dump) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ entry_id,
+ NULL);
+ (*num_failing_rules)++;
+ break;
+ }
+
+ /* Read current entry of all condition registers */
for (reg_id = 0; reg_id < rule->num_cond_regs;
reg_id++) {
const struct dbg_idle_chk_cond_reg *reg =
- &cond_regs[reg_id];
+ &cond_regs[reg_id];
+ u32 padded_entry_size, addr;
+ bool wide_bus;
- /* Find GRC address (if it's a memory,the
+ /* Find GRC address (if it's a memory, the
* address of the specific entry is calculated).
*/
- u32 addr =
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+ wide_bus =
GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS);
-
+ DBG_IDLE_CHK_COND_REG_WIDE_BUS);
if (reg->num_entries > 1 ||
reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two(reg->entry_size) :
- 1;
-
+ padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size)
+ : 1;
addr += (reg->start_entry + entry_id) *
padded_entry_size;
}
@@ -3991,28 +4307,27 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
next_reg_offset +=
- qed_grc_dump_addr_range(p_hwfn,
- p_ptt,
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
cond_reg_values +
next_reg_offset,
dump, addr,
- reg->entry_size);
+ reg->entry_size,
+ wide_bus);
}
- /* Call rule's condition function - a return value of
- * true indicates failure.
+ /* Call rule condition function.
+ * If returns true, it's a failure.
*/
- if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values)) {
- offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ if ((*cond_arr[rule->cond_id]) (cond_reg_values,
+ imm_values)) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -4028,8 +4343,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
- u32 offset = 0, input_offset = 0, num_failing_rules = 0;
- u32 num_failing_rules_offset;
+ u32 num_failing_rules_offset, offset = 0, input_offset = 0;
+ u32 num_failing_rules = 0;
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
@@ -4042,29 +4357,29 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
num_failing_rules_offset = offset;
offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
const struct dbg_idle_chk_cond_hdr *cond_hdr =
(const struct dbg_idle_chk_cond_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
[input_offset++];
- bool eval_mode = GET_FIELD(cond_hdr->mode.data,
- DBG_MODE_HDR_EVAL_MODE) > 0;
- bool mode_match = true;
+ bool eval_mode, mode_match = true;
+ u32 curr_failing_rules;
+ u16 modes_buf_offset;
/* Check mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
if (eval_mode) {
- u16 modes_buf_offset =
+ modes_buf_offset =
GET_FIELD(cond_hdr->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
-
mode_match = qed_is_mode_match(p_hwfn,
&modes_buf_offset);
}
if (mode_match) {
- u32 curr_failing_rules;
-
offset +=
qed_idle_chk_dump_rule_entries(p_hwfn,
p_ptt,
@@ -4086,10 +4401,13 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
qed_dump_num_param(dump_buf + num_failing_rules_offset,
dump, "num_rules", num_failing_rules);
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
return offset;
}
-/* Finds the meta data image in NVRAM. */
+/* Finds the meta data image in NVRAM */
static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 image_type,
@@ -4098,16 +4416,16 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
{
u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
struct mcp_file_att file_att;
+ int nvm_result;
/* Call NVRAM get file command */
- int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size,
- (u32 *)&file_att);
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att);
/* Check response */
if (nvm_result ||
@@ -4117,6 +4435,7 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
/* Update return values */
*nvram_offset_bytes = file_att.nvm_start_addr;
*nvram_size_bytes = file_att.len;
+
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
@@ -4125,22 +4444,25 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
/* Check alignment */
if (*nvram_size_bytes & 0x3)
return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
return DBG_STATUS_OK;
}
+/* Reads data from NVRAM */
static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_bytes,
u32 nvram_size_bytes, u32 *ret_buf)
{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
- u32 bytes_to_copy, read_offset = 0;
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0;
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"nvram_read: reading image of size %d bytes from NVRAM\n",
nvram_size_bytes);
+
do {
bytes_to_copy =
(bytes_left >
@@ -4155,8 +4477,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
DRV_MB_PARAM_NVM_LEN_SHIFT),
&ret_mcp_resp, &ret_mcp_param,
&ret_read_size,
- (u32 *)((u8 *)ret_buf +
- read_offset)) != 0)
+ (u32 *)((u8 *)ret_buf + read_offset)))
return DBG_STATUS_NVRAM_READ_FAILED;
/* Check response */
@@ -4172,24 +4493,20 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
}
/* Get info on the MCP Trace data in the scratchpad:
- * - trace_data_grc_addr - the GRC address of the trace data
- * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
- * the header)
+ * - trace_data_grc_addr (OUT): trace data GRC address in bytes
+ * - trace_data_size (OUT): trace data size in bytes (without the header)
*/
static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *trace_data_grc_addr,
- u32 *trace_data_size_bytes)
+ u32 *trace_data_size)
{
- /* Read MCP trace section offsize structure from MCP scratchpad */
- u32 spad_trace_offsize = qed_rd(p_hwfn,
- p_ptt,
- MCP_SPAD_TRACE_OFFSIZE_ADDR);
- u32 signature;
+ u32 spad_trace_offsize, signature;
- /* Extract MCP trace section GRC address from offsize structure (within
- * scratchpad).
- */
+ /* Read trace section offsize structure from MCP scratchpad */
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Extract trace section address from offsize (in scratchpad) */
*trace_data_grc_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
@@ -4197,42 +4514,41 @@ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
signature = qed_rd(p_hwfn, p_ptt,
*trace_data_grc_addr +
offsetof(struct mcp_trace, signature));
+
if (signature != MFW_TRACE_SIGNATURE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Read trace size from MCP trace section */
- *trace_data_size_bytes = qed_rd(p_hwfn,
- p_ptt,
- *trace_data_grc_addr +
- offsetof(struct mcp_trace, size));
+ *trace_data_size = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+
return DBG_STATUS_OK;
}
-/* Reads MCP trace meta data image from NVRAM.
- * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
- * file)
- * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
- * Trace meta data starts (invalid when loaded from file)
- * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+/* Reads MCP trace meta data image from NVRAM
+ * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
+ * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
+ * loaded from file).
+ * - trace_meta_size (OUT): size in bytes of the trace meta data.
*/
static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 trace_data_size_bytes,
u32 *running_bundle_id,
- u32 *trace_meta_offset_bytes,
- u32 *trace_meta_size_bytes)
+ u32 *trace_meta_offset,
+ u32 *trace_meta_size)
{
+ u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
+
/* Read MCP trace section offsize structure from MCP scratchpad */
- u32 spad_trace_offsize = qed_rd(p_hwfn,
- p_ptt,
- MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
/* Find running bundle ID */
- u32 running_mfw_addr =
+ running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- u32 nvram_image_type;
-
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
if (*running_bundle_id > 1)
return DBG_STATUS_INVALID_NVRAM_BUNDLE;
@@ -4241,40 +4557,33 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
-
return qed_find_nvram_image(p_hwfn,
p_ptt,
nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
+ trace_meta_offset, trace_meta_size);
}
-/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
- * buffer.
- */
+/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 nvram_offset_in_bytes,
u32 size_in_bytes, u32 *buf)
{
- u8 *byte_buf = (u8 *)buf;
- u8 modules_num, i;
+ u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
+ enum dbg_status status;
u32 signature;
/* Read meta data from NVRAM */
- enum dbg_status status = qed_nvram_read(p_hwfn,
- p_ptt,
- nvram_offset_in_bytes,
- size_in_bytes,
- buf);
-
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes, size_in_bytes, buf);
if (status != DBG_STATUS_OK)
return status;
/* Extract and check first signature */
signature = qed_read_unaligned_dword(byte_buf);
- byte_buf += sizeof(u32);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Extract number of modules */
@@ -4282,16 +4591,16 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
/* Skip all modules */
for (i = 0; i < modules_num; i++) {
- u8 module_len = *(byte_buf++);
-
+ module_len = *(byte_buf++);
byte_buf += module_len;
}
/* Extract and check second signature */
signature = qed_read_unaligned_dword(byte_buf);
- byte_buf += sizeof(u32);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
return DBG_STATUS_OK;
}
@@ -4308,10 +4617,10 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
bool mcp_access;
int halted = 0;
- mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
-
*num_dumped_dwords = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
/* Get trace data info */
status = qed_mcp_trace_get_data_info(p_hwfn,
p_ptt,
@@ -4328,7 +4637,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "dump-type", "mcp-trace");
/* Halt MCP while reading from scratchpad so the read data will be
- * consistent if halt fails, MCP trace is taken anyway, with a small
+ * consistent. if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
if (dump && mcp_access) {
@@ -4339,8 +4648,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Find trace data size */
trace_data_size_dwords =
- DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
- BYTES_IN_DWORD);
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
/* Dump trace data section header and param */
offset += qed_dump_section_hdr(dump_buf + offset,
@@ -4354,17 +4663,17 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
BYTES_TO_DWORDS(trace_data_grc_addr),
- trace_data_size_dwords);
+ trace_data_size_dwords, false);
/* Resume MCP (only if halt succeeded) */
- if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
/* Dump trace meta section header */
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "mcp_trace_meta", 1);
- /* Read trace meta info */
+ /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
if (mcp_access) {
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
@@ -4391,6 +4700,9 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
if (status == DBG_STATUS_OK)
offset += trace_meta_size_dwords;
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
*num_dumped_dwords = offset;
/* If no mcp access, indicate that the dump doesn't contain the meta
@@ -4405,7 +4717,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 offset = 0, dwords_read, size_param_offset;
+ u32 dwords_read, size_param_offset, offset = 0;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4417,8 +4729,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "reg-fifo");
- /* Dump fifo data section header and param. The size param is 0 for now,
- * and is overwritten after reading the FIFO.
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "reg_fifo_data", 1);
@@ -4430,8 +4742,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
* test how much data is available, except for reading it.
*/
offset += REG_FIFO_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
fifo_has_data = qed_rd(p_hwfn, p_ptt,
@@ -4456,8 +4767,12 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4467,7 +4782,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump, u32 *num_dumped_dwords)
{
- u32 offset = 0, dwords_read, size_param_offset;
+ u32 dwords_read, size_param_offset, offset = 0;
bool fifo_has_data;
*num_dumped_dwords = 0;
@@ -4479,8 +4794,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "igu-fifo");
- /* Dump fifo data section header and param. The size param is 0 for now,
- * and is overwritten after reading the FIFO.
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "igu_fifo_data", 1);
@@ -4492,8 +4807,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
* test how much data is available, except for reading it.
*/
offset += IGU_FIFO_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
fifo_has_data = qed_rd(p_hwfn, p_ptt,
@@ -4519,8 +4833,12 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4531,7 +4849,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
bool dump,
u32 *num_dumped_dwords)
{
- u32 offset = 0, size_param_offset, override_window_dwords;
+ u32 size_param_offset, override_window_dwords, offset = 0;
*num_dumped_dwords = 0;
@@ -4542,8 +4860,8 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "protection-override");
- /* Dump data section header and param. The size param is 0 for now, and
- * is overwritten after reading the data.
+ /* Dump data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the data.
*/
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "protection_override_data", 1);
@@ -4552,8 +4870,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
if (!dump) {
offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
- *num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ goto out;
}
/* Add override window info to buffer */
@@ -4569,8 +4886,12 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
offset += override_window_dwords;
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
*num_dumped_dwords = offset;
+
return DBG_STATUS_OK;
}
@@ -4593,11 +4914,14 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset, dump, 1);
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
+
+ /* Find Storm dump size */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
u32 last_list_idx, addr;
- if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ if (dev_data->block_in_reset[storm->block_id])
continue;
/* Read FW info for the current Storm */
@@ -4606,26 +4930,26 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
asserts = &fw_info.fw_asserts_section;
/* Dump FW Asserts section header and params */
- storm_letter_str[0] = s_storm_defs[storm_id].letter;
- offset += qed_dump_section_hdr(dump_buf + offset, dump,
- "fw_asserts", 2);
- offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
- storm_letter_str);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ storm_letter_str[0] = storm->letter;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "storm", storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
asserts->list_element_dword_size);
+ /* Read and dump FW Asserts data */
if (!dump) {
offset += asserts->list_element_dword_size;
continue;
}
- /* Read and dump FW Asserts data */
- fw_asserts_section_addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
+ fw_asserts_section_addr = storm->sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
- next_list_idx_addr =
- fw_asserts_section_addr +
+ next_list_idx_addr = fw_asserts_section_addr +
DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
@@ -4638,11 +4962,13 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
qed_grc_dump_addr_range(p_hwfn, p_ptt,
dump_buf + offset,
dump, addr,
- asserts->list_element_dword_size);
+ asserts->list_element_dword_size,
+ false);
}
/* Dump last section */
- offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+
return offset;
}
@@ -4650,10 +4976,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
- /* Convert binary data to debug arrays */
struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
+ /* convert binary data to debug arrays */
for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
@@ -4682,14 +5008,17 @@ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4702,12 +5031,14 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4724,25 +5055,31 @@ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
{
- enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct idle_chk_data *idle_chk;
+ enum dbg_status status;
+ idle_chk = &dev_data->idle_chk;
*buf_size = 0;
+
+ status = qed_dbg_dev_init(p_hwfn, p_ptt);
if (status != DBG_STATUS_OK)
return status;
+
if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
- if (!dev_data->idle_chk.buf_size_set) {
- dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
- p_ptt,
- NULL, false);
- dev_data->idle_chk.buf_size_set = true;
+
+ if (!idle_chk->buf_size_set) {
+ idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt, NULL, false);
+ idle_chk->buf_size_set = true;
}
- *buf_size = dev_data->idle_chk.buf_size;
+ *buf_size = idle_chk->buf_size;
+
return DBG_STATUS_OK;
}
@@ -4755,12 +5092,14 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4783,8 +5122,10 @@ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4797,13 +5138,12 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- /* validate buffer size */
status =
- qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
- if (status != DBG_STATUS_OK &&
- status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK && status !=
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
if (buf_size_in_dwords < needed_buf_size_in_dwords)
@@ -4829,8 +5169,10 @@ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4843,12 +5185,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4871,8 +5215,10 @@ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
}
@@ -4885,12 +5231,14 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4913,8 +5261,10 @@ qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
+
return qed_protection_override_dump(p_hwfn,
p_ptt, NULL, false, buf_size);
}
@@ -4925,15 +5275,18 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4958,12 +5311,15 @@ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
*buf_size = 0;
+
if (status != DBG_STATUS_OK)
return status;
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+
return DBG_STATUS_OK;
}
@@ -4973,19 +5329,26 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
- &needed_buf_size_in_dwords);
-
*num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
if (status != DBG_STATUS_OK)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return DBG_STATUS_OK;
}
@@ -5005,9 +5368,14 @@ struct mcp_trace_format {
#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+
char *format_str;
};
+/* Meta data structure, generated by a perl script during MFW build. therefore,
+ * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
+ * script.
+ */
struct mcp_trace_meta {
u32 modules_num;
char **modules;
@@ -5015,7 +5383,7 @@ struct mcp_trace_meta {
struct mcp_trace_format *formats;
};
-/* Reg fifo element */
+/* REG fifo element */
struct reg_fifo_element {
u64 data;
#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
@@ -5140,12 +5508,15 @@ struct igu_fifo_addr_data {
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
+
#define MCP_TRACE_MAX_MODULE_LEN 8
#define MCP_TRACE_FORMAT_MAX_PARAMS 3
#define MCP_TRACE_FORMAT_PARAM_WIDTH \
(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+
#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
/********************************* Macros ************************************/
@@ -5154,59 +5525,178 @@ struct igu_fifo_addr_data {
/***************************** Constant Arrays *******************************/
+struct user_dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Debug arrays */
+static struct user_dbg_array
+s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+
/* Status string array */
static const char * const s_status_str[] = {
+ /* DBG_STATUS_OK */
"Operation completed successfully",
+
+ /* DBG_STATUS_APP_VERSION_NOT_SET */
"Debug application version wasn't set",
+
+ /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
"Unsupported debug application version",
+
+ /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
"The debug block wasn't reset since the last recording",
+
+ /* DBG_STATUS_INVALID_ARGS */
"Invalid arguments",
+
+ /* DBG_STATUS_OUTPUT_ALREADY_SET */
"The debug output was already set",
+
+ /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
"Invalid PCI buffer size",
+
+ /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
"PCI buffer allocation failed",
+
+ /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
"A PCI buffer wasn't allocated",
+
+ /* DBG_STATUS_TOO_MANY_INPUTS */
"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
- "GRC/Timestamp input overlap in cycle dword 0",
+
+ /* DBG_STATUS_INPUT_OVERLAP */
+ "Overlapping debug bus inputs",
+
+ /* DBG_STATUS_HW_ONLY_RECORDING */
"Cannot record Storm data since the entire recording cycle is used by HW",
+
+ /* DBG_STATUS_STORM_ALREADY_ENABLED */
"The Storm was already enabled",
+
+ /* DBG_STATUS_STORM_NOT_ENABLED */
"The specified Storm wasn't enabled",
+
+ /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
"The block was already enabled",
+
+ /* DBG_STATUS_BLOCK_NOT_ENABLED */
"The specified block wasn't enabled",
+
+ /* DBG_STATUS_NO_INPUT_ENABLED */
"No input was enabled for recording",
+
+ /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
"Filters and triggers are not allowed when recording in 64b units",
+
+ /* DBG_STATUS_FILTER_ALREADY_ENABLED */
"The filter was already enabled",
+
+ /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
"The trigger was already enabled",
+
+ /* DBG_STATUS_TRIGGER_NOT_ENABLED */
"The trigger wasn't enabled",
+
+ /* DBG_STATUS_CANT_ADD_CONSTRAINT */
"A constraint can be added only after a filter was enabled or a trigger state was added",
+
+ /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
"Cannot add more than 3 trigger states",
+
+ /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
"Cannot add more than 4 constraints per filter or trigger state",
+
+ /* DBG_STATUS_RECORDING_NOT_STARTED */
"The recording wasn't started",
+
+ /* DBG_STATUS_DATA_DIDNT_TRIGGER */
"A trigger was configured, but it didn't trigger",
+
+ /* DBG_STATUS_NO_DATA_RECORDED */
"No data was recorded",
+
+ /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
"Dump buffer is too small",
+
+ /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
"Dumped data is not aligned to chunks",
+
+ /* DBG_STATUS_UNKNOWN_CHIP */
"Unknown chip",
+
+ /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
"Failed allocating virtual memory",
+
+ /* DBG_STATUS_BLOCK_IN_RESET */
"The input block is in reset",
+
+ /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
"Invalid MCP trace signature found in NVRAM",
+
+ /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
"Invalid bundle ID found in NVRAM",
+
+ /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
"Failed getting NVRAM image",
+
+ /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
"NVRAM image is not dword-aligned",
+
+ /* DBG_STATUS_NVRAM_READ_FAILED */
"Failed reading from NVRAM",
+
+ /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
"Idle check parsing failed",
+
+ /* DBG_STATUS_MCP_TRACE_BAD_DATA */
"MCP Trace data is corrupt",
- "Dump doesn't contain meta data - it must be provided in an image file",
+
+ /* DBG_STATUS_MCP_TRACE_NO_META */
+ "Dump doesn't contain meta data - it must be provided in image file",
+
+ /* DBG_STATUS_MCP_COULD_NOT_HALT */
"Failed to halt MCP",
+
+ /* DBG_STATUS_MCP_COULD_NOT_RESUME */
"Failed to resume MCP after halt",
+
+ /* DBG_STATUS_DMAE_FAILED */
"DMAE transaction failed",
+
+ /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
"Failed to empty SEMI sync FIFO",
+
+ /* DBG_STATUS_IGU_FIFO_BAD_DATA */
"IGU FIFO data is corrupt",
+
+ /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
"MCP failed to mask parities",
+
+ /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
"FW Asserts parsing failed",
+
+ /* DBG_STATUS_REG_FIFO_BAD_DATA */
"GRC FIFO data is corrupt",
+
+ /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
"Protection Override data is corrupt",
+
+ /* DBG_STATUS_DBG_ARRAY_NOT_SET */
"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
- "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+
+ /* DBG_STATUS_FILTER_BUG */
+ "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
+
+ /* DBG_STATUS_NON_MATCHING_LINES */
+ "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
+
+ /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
+ "The selected trigger dword offset wasn't enabled in the recorded HW block",
+
+ /* DBG_STATUS_DBG_BUS_IN_USE */
+ "The debug bus is in use"
};
/* Idle check severity names array */
@@ -5223,12 +5713,13 @@ static const char * const s_mcp_trace_level_str[] = {
"DEBUG"
};
-/* Parsing strings */
+/* Access type names array */
static const char * const s_access_strs[] = {
"read",
"write"
};
+/* Privilege type names array */
static const char * const s_privilege_strs[] = {
"VF",
"PDA",
@@ -5236,6 +5727,7 @@ static const char * const s_privilege_strs[] = {
"UA"
};
+/* Protection type names array */
static const char * const s_protection_strs[] = {
"(default)",
"(default)",
@@ -5247,6 +5739,7 @@ static const char * const s_protection_strs[] = {
"override UA"
};
+/* Master type names array */
static const char * const s_master_strs[] = {
"???",
"pxp",
@@ -5266,6 +5759,7 @@ static const char * const s_master_strs[] = {
"???"
};
+/* REG FIFO error messages array */
static const char * const s_reg_fifo_error_strs[] = {
"grc timeout",
"address doesn't belong to any block",
@@ -5274,6 +5768,7 @@ static const char * const s_reg_fifo_error_strs[] = {
"path isolation error"
};
+/* IGU FIFO sources array */
static const char * const s_igu_fifo_source_strs[] = {
"TSTORM",
"MSTORM",
@@ -5288,6 +5783,7 @@ static const char * const s_igu_fifo_source_strs[] = {
"GRC",
};
+/* IGU FIFO error messages */
static const char * const s_igu_fifo_error_strs[] = {
"no error",
"length error",
@@ -5308,13 +5804,18 @@ static const char * const s_igu_fifo_error_strs[] = {
/* IGU FIFO address data */
static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
- {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
- {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
- {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x0, 0x101, "MSI-X Memory", NULL,
+ IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL,
+ IGU_ADDR_TYPE_WRITE_PBA},
{0x201, 0x201, "Write PBA[64:127]", "reserved",
IGU_ADDR_TYPE_WRITE_PBA},
- {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
- {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x202, 0x202, "Write PBA[128]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
IGU_ADDR_TYPE_WRITE_INT_ACK},
{0x5f0, 0x5f0, "Attention bits update", NULL,
@@ -5331,8 +5832,10 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
IGU_ADDR_TYPE_READ_INT},
{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
IGU_ADDR_TYPE_READ_INT},
- {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
- {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+ {0x5f7, 0x5ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
};
/******************************** Variables **********************************/
@@ -5340,28 +5843,12 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
/* MCP Trace meta data - used in case the dump doesn't contain the meta data
* (e.g. due to no NVRAM access).
*/
-static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
/* Temporary buffer, used for print size calculations */
static char s_temp_buf[MAX_MSG_LEN];
-/***************************** Public Functions *******************************/
-
-enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
-{
- /* Convert binary data to debug arrays */
- struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
- u8 buf_id;
-
- for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
- s_dbg_arrays[buf_id].ptr =
- (u32 *)(bin_ptr + buf_array[buf_id].offset);
- s_dbg_arrays[buf_id].size_in_dwords =
- BYTES_TO_DWORDS(buf_array[buf_id].length);
- }
-
- return DBG_STATUS_OK;
-}
+/**************************** Private Functions ******************************/
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
@@ -5381,10 +5868,8 @@ static u32 qed_read_from_cyclic_buf(void *buf,
u32 *offset,
u32 buf_size, u8 num_bytes_to_read)
{
- u8 *bytes_buf = (u8 *)buf;
- u8 *val_ptr;
+ u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
u32 val = 0;
- u8 i;
val_ptr = (u8 *)&val;
@@ -5412,6 +5897,7 @@ static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
*offset += 4;
+
return dword_val;
}
@@ -5445,7 +5931,7 @@ static u32 qed_read_param(u32 *dump_buf,
const char **param_str_val, u32 *param_num_val)
{
char *char_buf = (char *)dump_buf;
- u32 offset = 0; /* In bytes */
+ size_t offset = 0;
/* Extract param name */
*param_name = char_buf;
@@ -5493,37 +5979,31 @@ static u32 qed_print_section_params(u32 *dump_buf,
u32 i, dump_offset = 0, results_offset = 0;
for (i = 0; i < num_section_params; i++) {
- const char *param_name;
- const char *param_str_val;
+ const char *param_name, *param_str_val;
u32 param_num_val = 0;
dump_offset += qed_read_param(dump_buf + dump_offset,
&param_name,
&param_str_val, &param_num_val);
+
if (param_str_val)
- /* String param */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"%s: %s\n", param_name, param_str_val);
else if (strcmp(param_name, "fw-timestamp"))
- /* Numeric param */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"%s: %d\n", param_name, param_num_val);
}
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
+ "\n");
+
*num_chars_printed = results_offset;
- return dump_offset;
-}
-const char *qed_dbg_get_status_str(enum dbg_status status)
-{
- return (status <
- MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+ return dump_offset;
}
/* Parses the idle check rules and returns the number of characters printed.
@@ -5537,7 +6017,10 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *num_errors, u32 *num_warnings)
{
- u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
+
+ u32 rule_idx;
u16 i, j;
*num_errors = 0;
@@ -5548,16 +6031,15 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
rule_idx++) {
const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
struct dbg_idle_chk_result_hdr *hdr;
- const char *parsing_str;
+ const char *parsing_str, *lsi_msg;
u32 parsing_str_offset;
- const char *lsi_msg;
- u8 curr_reg_id = 0;
bool has_fw_msg;
+ u8 curr_reg_id;
hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
rule_parsing_data =
(const struct dbg_idle_chk_rule_parsing_data *)
- &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
ptr[hdr->rule_id];
parsing_str_offset =
GET_FIELD(rule_parsing_data->data,
@@ -5565,16 +6047,18 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
has_fw_msg =
GET_FIELD(rule_parsing_data->data,
DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
- parsing_str = &((const char *)
- s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
- [parsing_str_offset];
+ parsing_str =
+ &((const char *)
+ s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
lsi_msg = parsing_str;
+ curr_reg_id = 0;
if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
return 0;
/* Skip rule header */
- dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+ dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
/* Update errors/warnings count */
if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
@@ -5606,19 +6090,19 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
for (i = 0;
i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
i++) {
- struct dbg_idle_chk_result_reg_hdr *reg_hdr
- = (struct dbg_idle_chk_result_reg_hdr *)
- dump_buf;
- bool is_mem =
- GET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
- u8 reg_id =
- GET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool is_mem;
+ u8 reg_id;
+
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
+ is_mem = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ reg_id = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
/* Skip reg header */
- dump_buf +=
- (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+ dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
/* Skip register names until the required reg_id is
* reached.
@@ -5660,6 +6144,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
/* Check if end of dump buffer was exceeded */
if (dump_buf > dump_buf_end)
return 0;
+
return results_offset;
}
@@ -5680,13 +6165,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
u32 num_section_params = 0, num_rules;
- u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
*parsed_results_bytes = 0;
*num_errors = 0;
*num_warnings = 0;
- if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
- !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+
+ if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
return DBG_STATUS_DBG_ARRAY_NOT_SET;
/* Read global_params section */
@@ -5705,10 +6193,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
&section_name, &num_section_params);
if (strcmp(section_name, "idle_chk") || num_section_params != 1)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
-
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &num_rules);
- if (strcmp(param_name, "num_rules") != 0)
+ if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
if (num_rules) {
@@ -5728,7 +6215,7 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset : NULL,
num_errors, num_warnings);
results_offset += rules_print_size;
- if (rules_print_size == 0)
+ if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
/* Print LSI output */
@@ -5745,64 +6232,33 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset : NULL,
num_errors, num_warnings);
results_offset += rules_print_size;
- if (rules_print_size == 0)
+ if (!rules_print_size)
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
}
/* Print errors/warnings count */
- if (*num_errors) {
+ if (*num_errors)
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
"\nIdle Check failed!!! (with %d errors and %d warnings)\n",
*num_errors, *num_warnings);
- } else if (*num_warnings) {
+ else if (*num_warnings)
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "\nIdle Check completed successfuly (with %d warnings)\n",
+ "\nIdle Check completed successfully (with %d warnings)\n",
*num_warnings);
- } else {
+ else
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "\nIdle Check completed successfuly\n");
- }
+ "\nIdle Check completed successfully\n");
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- u32 num_errors, num_warnings;
-
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL,
- results_buf_size,
- &num_errors, &num_warnings);
-}
-
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors, u32 *num_warnings)
-{
- u32 parsed_buf_size;
-
- return qed_parse_idle_chk_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf,
- &parsed_buf_size,
- num_errors, num_warnings);
+ return DBG_STATUS_OK;
}
/* Frees the specified MCP Trace meta data */
@@ -5841,12 +6297,10 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
/* Read first signature */
signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
- /* Read number of modules and allocate memory for all the modules
- * pointers.
- */
+ /* Read no. of modules and allocate memory for their pointers */
meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
if (!meta->modules)
@@ -5871,7 +6325,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
/* Read second signature */
signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
- if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ if (signature != NVM_MAGIC_VALUE)
return DBG_STATUS_INVALID_TRACE_SIGNATURE;
/* Read number of formats and allocate memory for all formats */
@@ -5919,10 +6373,10 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_mask, param_shift, param_num_val;
- u32 num_section_params, offset, end_offset, bytes_left;
+ u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
+ u32 param_mask, param_shift, param_num_val, num_section_params;
const char *section_name, *param_name, *param_str_val;
- u32 trace_data_dwords, trace_meta_dwords;
+ u32 offset, results_offset = 0;
struct mcp_trace_meta meta;
struct mcp_trace *trace;
enum dbg_status status;
@@ -5955,7 +6409,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Prepare trace info */
trace = (struct mcp_trace *)dump_buf;
- trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ trace_buf = (u8 *)dump_buf + sizeof(*trace);
offset = trace->trace_oldest;
end_offset = trace->trace_prod;
bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
@@ -5968,7 +6422,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
return DBG_STATUS_MCP_TRACE_BAD_DATA;
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &param_num_val);
- if (strcmp(param_name, "size") != 0)
+ if (strcmp(param_name, "size"))
return DBG_STATUS_MCP_TRACE_BAD_DATA;
trace_meta_dwords = param_num_val;
@@ -6028,6 +6482,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
format_ptr = &meta.formats[format_idx];
+
for (i = 0,
param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
@@ -6050,6 +6505,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
*/
if (param_size == 3)
param_size = 4;
+
if (bytes_left < param_size) {
status = DBG_STATUS_MCP_TRACE_BAD_DATA;
goto free_mem;
@@ -6059,13 +6515,14 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
&offset,
trace->size,
param_size);
+
bytes_left -= param_size;
}
format_level =
(u8)((format_ptr->data &
MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
format_module =
(u8)((format_ptr->data &
MCP_TRACE_FORMAT_MODULE_MASK) >>
@@ -6094,30 +6551,6 @@ free_mem:
return status;
}
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
-}
-
/* Parses a Reg FIFO dump buffer.
* If result_buf is not NULL, the Reg FIFO results are printed to it.
* In any case, the required results buffer size is assigned to
@@ -6130,10 +6563,11 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct reg_fifo_element *elements;
u8 i, j, err_val, vf_val;
+ u32 results_offset = 0;
char vf_str[4];
/* Read global_params section */
@@ -6179,17 +6613,17 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
"raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_ADDRESS) *
- REG_FIFO_ELEMENT_ADDR_FACTOR,
- s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
+ REG_FIFO_ELEMENT_PF),
+ vf_str,
(u32)GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PORT),
- s_privilege_strs[GET_FIELD(elements[i].
- data,
- REG_FIFO_ELEMENT_PRIVILEGE)],
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
s_protection_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PROTECTION)],
s_master_strs[GET_FIELD(elements[i].data,
@@ -6201,18 +6635,18 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
REG_FIFO_ELEMENT_ERROR);
j < ARRAY_SIZE(s_reg_fifo_error_strs);
j++, err_val >>= 1) {
- if (!(err_val & 0x1))
- continue;
- if (err_printed)
+ if (err_val & 0x1) {
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ", ");
results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- ", ");
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset), "%s",
- s_reg_fifo_error_strs[j]);
- err_printed = true;
+ sprintf(qed_get_buf_ptr
+ (results_buf, results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
}
results_offset +=
@@ -6225,31 +6659,140 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
return DBG_STATUS_OK;
}
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
+static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
+ *element, char
+ *results_buf,
+ u32 *results_offset,
+ u32 *parsed_results_bytes)
{
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
+ const struct igu_fifo_addr_data *found_addr = NULL;
+ u8 source, err_type, i, is_cleanup;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u32 wr_data, prod_cons;
+ bool is_wr_cmd, is_pf;
+ u16 cmd_addr;
+ u64 dword12;
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
+ /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ dword12 = ((u64)element->dword2 << 32) | element->dword1;
+ is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
- return qed_parse_reg_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ /* Find address data */
+ for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
+ const struct igu_fifo_addr_data *curr_addr =
+ &s_igu_fifo_addr_data[i];
+
+ if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
+ curr_addr->end_addr)
+ found_addr = curr_addr;
+ }
+
+ if (!found_addr)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (found_addr->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB = 0x%x", cmd_addr - found_addr->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ if (!is_wr_cmd) {
+ parsed_wr_data[0] = '\0';
+ goto out;
+ }
+
+ /* Prepare parsed write data */
+ wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
+ is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val, cleanup_type;
+
+ cleanup_val =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ cleanup_type =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag, en_dis_int_for_sb, segment;
+ u8 timer_mask;
+
+ update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
+ : "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+out:
+ /* Add parsed element to parsed buffer */
+ *results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ *results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
+ element->dword2, element->dword1,
+ element->dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(element->dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd",
+ cmd_addr,
+ (!is_pf && found_addr->vf_desc)
+ ? found_addr->vf_desc
+ : found_addr->desc,
+ parsed_addr_data,
+ parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+
+ return DBG_STATUS_OK;
}
/* Parses an IGU FIFO dump buffer.
@@ -6264,12 +6807,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct igu_fifo_element *elements;
- char parsed_addr_data[32];
- char parsed_wr_data[256];
- u8 i, j;
+ enum dbg_status status;
+ u32 results_offset = 0;
+ u8 i;
/* Read global_params section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6298,118 +6841,12 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Decode elements */
for (i = 0; i < num_elements; i++) {
- /* dword12 (dword index 1 and 2) contains bits 32..95 of the
- * FIFO element.
- */
- u64 dword12 =
- ((u64)elements[i].dword2 << 32) | elements[i].dword1;
- bool is_wr_cmd = GET_FIELD(dword12,
- IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
- bool is_pf = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_IS_PF);
- u16 cmd_addr = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
- u8 source = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_SOURCE);
- u8 err_type = GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
- const struct igu_fifo_addr_data *addr_data = NULL;
-
- if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
- if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
-
- /* Find address data */
- for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
- j++)
- if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
- cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
- addr_data = &s_igu_fifo_addr_data[j];
- if (!addr_data)
- return DBG_STATUS_IGU_FIFO_BAD_DATA;
-
- /* Prepare parsed address data */
- switch (addr_data->type) {
- case IGU_ADDR_TYPE_MSIX_MEM:
- sprintf(parsed_addr_data,
- " vector_num=0x%x", cmd_addr / 2);
- break;
- case IGU_ADDR_TYPE_WRITE_INT_ACK:
- case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
- sprintf(parsed_addr_data,
- " SB=0x%x", cmd_addr - addr_data->start_addr);
- break;
- default:
- parsed_addr_data[0] = '\0';
- }
-
- /* Prepare parsed write data */
- if (is_wr_cmd) {
- u32 wr_data = GET_FIELD(dword12,
- IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
- u32 prod_cons = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_PROD_CONS);
- u8 is_cleanup = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_CMD_TYPE);
-
- if (source == IGU_SRC_ATTN) {
- sprintf(parsed_wr_data,
- "prod: 0x%x, ", prod_cons);
- } else {
- if (is_cleanup) {
- u8 cleanup_val = GET_FIELD(wr_data,
- IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
- u8 cleanup_type = GET_FIELD(wr_data,
- IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
-
- sprintf(parsed_wr_data,
- "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
- cleanup_val ? "set" : "clear",
- cleanup_type);
- } else {
- u8 update_flag = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_UPDATE_FLAG);
- u8 en_dis_int_for_sb =
- GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
- u8 segment = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_SEGMENT);
- u8 timer_mask = GET_FIELD(wr_data,
- IGU_FIFO_WR_DATA_TIMER_MASK);
-
- sprintf(parsed_wr_data,
- "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
- prod_cons,
- update_flag ? "update" : "nop",
- en_dis_int_for_sb
- ? (en_dis_int_for_sb ==
- 1 ? "disable" : "nop") :
- "enable",
- segment ? "attn" : "regular",
- timer_mask);
- }
- }
- } else {
- parsed_wr_data[0] = '\0';
- }
-
- /* Add parsed element to parsed buffer */
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
- elements[i].dword2, elements[i].dword1,
- elements[i].dword0,
- is_pf ? "pf" : "vf",
- GET_FIELD(elements[i].dword0,
- IGU_FIFO_ELEMENT_DWORD0_FID),
- s_igu_fifo_source_strs[source],
- is_wr_cmd ? "wr" : "rd", cmd_addr,
- (!is_pf && addr_data->vf_desc)
- ? addr_data->vf_desc : addr_data->desc,
- parsed_addr_data, parsed_wr_data,
- s_igu_fifo_error_strs[err_type]);
+ status = qed_parse_igu_fifo_element(&elements[i],
+ results_buf,
+ &results_offset,
+ parsed_results_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
}
results_offset += sprintf(qed_get_buf_ptr(results_buf,
@@ -6418,31 +6855,8 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_igu_fifo_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf, &parsed_buf_size);
+ return DBG_STATUS_OK;
}
static enum dbg_status
@@ -6452,9 +6866,10 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, param_num_val, num_section_params, num_elements;
const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
struct protection_override_element *elements;
+ u32 results_offset = 0;
u8 i;
/* Read global_params section */
@@ -6477,7 +6892,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
&param_name, &param_str_val, &param_num_val);
if (strcmp(param_name, "size"))
return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
- if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
elements = (struct protection_override_element *)dump_buf;
@@ -6486,7 +6901,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_elements; i++) {
u32 address = GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
- PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6512,33 +6927,8 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
- return DBG_STATUS_OK;
-}
-enum dbg_status
-qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size)
-{
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- NULL, results_buf_size);
-}
-
-enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_protection_override_dump(p_hwfn,
- dump_buf,
- num_dumped_dwords,
- results_buf,
- &parsed_buf_size);
+ return DBG_STATUS_OK;
}
/* Parses a FW Asserts dump buffer.
@@ -6553,7 +6943,7 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *parsed_results_bytes)
{
- u32 results_offset = 0, num_section_params, param_num_val, i;
+ u32 num_section_params, param_num_val, i, results_offset = 0;
const char *param_name, *param_str_val, *section_name;
bool last_section_found = false;
@@ -6569,54 +6959,216 @@ static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
dump_buf += qed_print_section_params(dump_buf,
num_section_params,
results_buf, &results_offset);
- while (!last_section_found) {
- const char *storm_letter = NULL;
- u32 storm_dump_size = 0;
+ while (!last_section_found) {
dump_buf += qed_read_section_hdr(dump_buf,
&section_name,
&num_section_params);
- if (!strcmp(section_name, "last")) {
- last_section_found = true;
- continue;
- } else if (strcmp(section_name, "fw_asserts")) {
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- }
+ if (!strcmp(section_name, "fw_asserts")) {
+ /* Extract params */
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
- /* Extract params */
- for (i = 0; i < num_section_params; i++) {
- dump_buf += qed_read_param(dump_buf,
- &param_name,
- &param_str_val,
- &param_num_val);
- if (!strcmp(param_name, "storm"))
- storm_letter = param_str_val;
- else if (!strcmp(param_name, "size"))
- storm_dump_size = param_num_val;
- else
+ if (!storm_letter || !storm_dump_size)
return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- }
-
- if (!storm_letter || !storm_dump_size)
- return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
- /* Print data */
- results_offset += sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- "\n%sSTORM_ASSERT: size=%d\n",
- storm_letter, storm_dump_size);
- for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ /* Print data */
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "%08x\n", *dump_buf);
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ } else if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ } else {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
}
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
+ u8 buf_id;
+
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
+ s_user_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_user_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
return DBG_STATUS_OK;
}
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
+{
+ s_mcp_trace_meta.ptr = data;
+ s_mcp_trace_meta.size_in_dwords = size;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size);
+}
+
enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index f872d7324814..ea1cc8eaa125 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -20,6 +20,9 @@ enum qed_dbg_features {
DBG_FEATURE_NUM
};
+/* Forward Declaration */
+struct qed_dev;
+
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 463927f17032..072d950cd8ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -161,6 +161,7 @@ void qed_resc_free(struct qed_dev *cdev)
cdev->fw_data = NULL;
kfree(cdev->reset_stats);
+ cdev->reset_stats = NULL;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -168,18 +169,18 @@ void qed_resc_free(struct qed_dev *cdev)
qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
qed_spq_free(p_hwfn);
- qed_eq_free(p_hwfn, p_hwfn->p_eq);
- qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_eq_free(p_hwfn);
+ qed_consq_free(p_hwfn);
qed_int_free(p_hwfn);
#ifdef CONFIG_QED_LL2
- qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+ qed_ll2_free(p_hwfn);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
- qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info);
+ qed_fcoe_free(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
- qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+ qed_iscsi_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
}
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
@@ -299,7 +300,7 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */
- four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+ four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
@@ -328,7 +329,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
- u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -692,7 +693,7 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
/* port table */
- for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
port = &(qm_info->qm_port_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
@@ -822,7 +823,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- p_hwfn->cdev->num_ports_in_engines,
+ p_hwfn->cdev->num_ports_in_engine,
GFP_KERNEL);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -843,15 +844,7 @@ alloc_err:
int qed_resc_alloc(struct qed_dev *cdev)
{
- struct qed_iscsi_info *p_iscsi_info;
- struct qed_fcoe_info *p_fcoe_info;
- struct qed_ooo_info *p_ooo_info;
-#ifdef CONFIG_QED_LL2
- struct qed_ll2_info *p_ll2_info;
-#endif
u32 rdma_tasks, excess_tasks;
- struct qed_consq *p_consq;
- struct qed_eq *p_eq;
u32 line_count;
int i, rc = 0;
@@ -956,45 +949,38 @@ int qed_resc_alloc(struct qed_dev *cdev)
DP_ERR(p_hwfn,
"Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
- rc = -EINVAL;
- goto alloc_err;
+ goto alloc_no_mem;
}
- p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
- if (!p_eq)
- goto alloc_no_mem;
- p_hwfn->p_eq = p_eq;
+ rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ if (rc)
+ goto alloc_err;
- p_consq = qed_consq_alloc(p_hwfn);
- if (!p_consq)
- goto alloc_no_mem;
- p_hwfn->p_consq = p_consq;
+ rc = qed_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) {
- p_ll2_info = qed_ll2_alloc(p_hwfn);
- if (!p_ll2_info)
- goto alloc_no_mem;
- p_hwfn->p_ll2_info = p_ll2_info;
+ rc = qed_ll2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
- p_fcoe_info = qed_fcoe_alloc(p_hwfn);
- if (!p_fcoe_info)
- goto alloc_no_mem;
- p_hwfn->p_fcoe_info = p_fcoe_info;
+ rc = qed_fcoe_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- p_iscsi_info = qed_iscsi_alloc(p_hwfn);
- if (!p_iscsi_info)
- goto alloc_no_mem;
- p_hwfn->p_iscsi_info = p_iscsi_info;
- p_ooo_info = qed_ooo_alloc(p_hwfn);
- if (!p_ooo_info)
- goto alloc_no_mem;
- p_hwfn->p_ooo_info = p_ooo_info;
+ rc = qed_iscsi_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
/* DMA info initialization */
@@ -1033,8 +1019,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_cxt_mngr_setup(p_hwfn);
qed_spq_setup(p_hwfn);
- qed_eq_setup(p_hwfn, p_hwfn->p_eq);
- qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+ qed_eq_setup(p_hwfn);
+ qed_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */
qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
@@ -1047,14 +1033,14 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
- qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+ qed_ll2_setup(p_hwfn);
#endif
if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
- qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info);
+ qed_fcoe_setup(p_hwfn);
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
- qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
+ qed_iscsi_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
}
}
}
@@ -1122,7 +1108,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
- switch (p_hwfn->cdev->num_ports_in_engines) {
+ switch (p_hwfn->cdev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
@@ -1134,7 +1120,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
break;
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
- p_hwfn->cdev->num_ports_in_engines);
+ p_hwfn->cdev->num_ports_in_engine);
return -EINVAL;
}
@@ -1267,7 +1253,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
}
memset(&params, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
@@ -1527,7 +1513,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+ p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
@@ -1711,6 +1698,11 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
return mfw_rc;
}
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn,
QED_MSG_DCB,
@@ -1956,6 +1948,13 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
if (!p_ptt)
return -EAGAIN;
+ /* If roce info is allocated it means roce is initialized and should
+ * be enabled in searcher.
+ */
+ if (p_hwfn->p_rdma_info &&
+ p_hwfn->b_rdma_enabled_in_prs)
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_ptt_release(p_hwfn, p_ptt);
@@ -1968,6 +1967,7 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
{
qed_ptt_pool_free(p_hwfn);
kfree(p_hwfn->hw_info.p_igu_info);
+ p_hwfn->hw_info.p_igu_info = NULL;
}
/* Setup bar access */
@@ -2252,7 +2252,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_BDQ:
if (!*p_resc_num)
*p_resc_start = 0;
- else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
*p_resc_start = p_hwfn->port_id;
@@ -2669,15 +2669,15 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
- p_hwfn->cdev->num_ports_in_engines = 1;
+ p_hwfn->cdev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
- p_hwfn->cdev->num_ports_in_engines = 2;
+ p_hwfn->cdev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
- p_hwfn->cdev->num_ports_in_engines);
+ p_hwfn->cdev->num_ports_in_engine);
- /* Default num_ports_in_engines to something */
- p_hwfn->cdev->num_ports_in_engines = 1;
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engine = 1;
}
}
@@ -2687,20 +2687,20 @@ static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
u32 port;
int i;
- p_hwfn->cdev->num_ports_in_engines = 0;
+ p_hwfn->cdev->num_ports_in_engine = 0;
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
if (port & 1)
- p_hwfn->cdev->num_ports_in_engines++;
+ p_hwfn->cdev->num_ports_in_engine++;
}
- if (!p_hwfn->cdev->num_ports_in_engines) {
+ if (!p_hwfn->cdev->num_ports_in_engine) {
DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
/* Default num_ports_in_engine to something */
- p_hwfn->cdev->num_ports_in_engines = 1;
+ p_hwfn->cdev->num_ports_in_engine = 1;
}
}
@@ -2819,12 +2819,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
- if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
- DP_NOTICE(cdev->hwfns,
- "The chip type/rev (BB A0) is not supported!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -4074,7 +4068,7 @@ static int qed_device_num_ports(struct qed_dev *cdev)
if (cdev->num_hwfns > 1)
return 1;
- return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
+ return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
}
int qed_device_get_port_id(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index 21a58fffd02b..cb342f16c137 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -539,7 +538,7 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
}
}
-struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_fcoe_info *p_fcoe_info;
@@ -547,19 +546,21 @@ struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
if (!p_fcoe_info) {
DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
- return NULL;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&p_fcoe_info->free_list);
- return p_fcoe_info;
+
+ p_hwfn->p_fcoe_info = p_fcoe_info;
+ return 0;
}
-void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
struct fcoe_task_context *p_task_ctx = NULL;
int rc;
u32 i;
- spin_lock_init(&p_fcoe_info->lock);
+ spin_lock_init(&p_hwfn->p_fcoe_info->lock);
for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
rc = qed_cxt_get_task_ctx(p_hwfn, i,
QED_CTX_WORKING_MEM,
@@ -577,15 +578,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
}
}
-void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
+void qed_fcoe_free(struct qed_hwfn *p_hwfn)
{
struct qed_fcoe_conn *p_conn = NULL;
- if (!p_fcoe_info)
+ if (!p_hwfn->p_fcoe_info)
return;
- while (!list_empty(&p_fcoe_info->free_list)) {
- p_conn = list_first_entry(&p_fcoe_info->free_list,
+ while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
struct qed_fcoe_conn, list_entry);
if (!p_conn)
break;
@@ -593,7 +594,8 @@ void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info)
qed_fcoe_free_connection(p_hwfn, p_conn);
}
- kfree(p_fcoe_info);
+ kfree(p_hwfn->p_fcoe_info);
+ p_hwfn->p_fcoe_info = NULL;
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
index 472af34a171d..027a76ac839a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
@@ -49,29 +49,21 @@ struct qed_fcoe_info {
};
#if IS_ENABLED(CONFIG_QED_FCOE)
-struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
-void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
-void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info);
+void qed_fcoe_free(struct qed_hwfn *p_hwfn);
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats);
#else /* CONFIG_QED_FCOE */
-static inline struct qed_fcoe_info *
-qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
- return NULL;
+ return -EINVAL;
}
-static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_info *p_fcoe_info)
-{
-}
-
-static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn,
- struct qed_fcoe_info *p_fcoe_info)
-{
-}
+static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 858a57a73589..802c162d8474 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -346,7 +346,7 @@ struct xstorm_core_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -368,85 +368,85 @@ struct tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -681,7 +681,9 @@ struct core_rx_fast_path_cqe {
__le16 packet_length;
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved[4];
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
};
struct core_rx_gsi_offload_cqe {
@@ -692,7 +694,7 @@ struct core_rx_gsi_offload_cqe {
__le16 vlan;
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
- u8 reserved1[2];
+ __le16 qp_id;
__le32 gid_dst[4];
};
@@ -774,15 +776,15 @@ struct core_tx_bd {
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK 0x1
-#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED_MASK 0x1
-#define CORE_TX_BD_RESERVED_SHIFT 15
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
};
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP,
MAX_CORE_TX_DEST
};
@@ -804,12 +806,12 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
-enum dcb_dhcp_update_flag {
- DONT_UPDATE_DCB_DHCP,
+enum dcb_dscp_update_mode {
+ DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
UPDATE_DSCP,
UPDATE_DCB_DSCP,
- MAX_DCB_DHCP_UPDATE_FLAG
+ MAX_DCB_DSCP_UPDATE_MODE
};
struct eth_mstorm_per_pf_stat {
@@ -917,6 +919,14 @@ struct hsi_fp_ver_struct {
u8 major_ver_arr[2];
};
+enum iwarp_ll2_tx_queues {
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
+ IWARP_LL2_ERROR,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
/* Mstorm non-triggering VF zone */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
@@ -960,7 +970,7 @@ enum personality_type {
PERSONALITY_ISCSI,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
- PERSONALITY_RESERVED3,
+ PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
PERSONALITY_RESERVED4,
@@ -971,16 +981,12 @@ enum personality_type {
struct pf_start_tunnel_config {
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
+ u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
};
@@ -990,6 +996,7 @@ struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
+ __le32 reserved;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
@@ -1007,7 +1014,6 @@ struct pf_start_ramrod_data {
u8 pri_map_valid;
__le32 outer_tag;
struct hsi_fp_ver_struct hsi_fp_ver;
-
};
struct protocol_dcb_data {
@@ -1023,14 +1029,8 @@ struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
u8 update_rx_def_non_ucast_clss;
- u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
@@ -1038,17 +1038,17 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
- __le16 reserved[2];
+ __le16 reserved;
};
struct pf_update_ramrod_data {
u8 pf_id;
- u8 update_eth_dcb_data_flag;
- u8 update_fcoe_dcb_data_flag;
- u8 update_iscsi_dcb_data_flag;
- u8 update_roce_dcb_data_flag;
- u8 update_rroce_dcb_data_flag;
- u8 update_iwarp_dcb_data_flag;
+ u8 update_eth_dcb_data_mode;
+ u8 update_fcoe_dcb_data_mode;
+ u8 update_iscsi_dcb_data_mode;
+ u8 update_roce_dcb_data_mode;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
@@ -1127,7 +1127,7 @@ struct tstorm_per_port_stat {
struct regpair iscsi_irregular_pkt;
struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
- struct regpair reserved;
+ struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1326,6 +1326,87 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
@@ -1389,44 +1470,6 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
-
-struct mstorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1541,50 +1584,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-struct ystorm_core_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2;
- u8 byte3;
- __le16 word0;
- __le32 reg0;
- __le32 reg1;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le32 reg2;
- __le32 reg3;
-};
-
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
@@ -1643,6 +1642,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -1656,6 +1657,10 @@ enum block_addr {
GRCBASE_TCFC = 0x2d0000,
GRCBASE_IGU = 0x180000,
GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
GRCBASE_UMAC = 0x51000,
GRCBASE_XMAC = 0x210000,
GRCBASE_DBG = 0x10000,
@@ -1669,10 +1674,6 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_RGFS = 0x19d0000,
- GRCBASE_TGFS = 0x19e0000,
- GRCBASE_PTLD = 0x19f0000,
- GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1733,8 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -1745,6 +1748,10 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_DBG,
@@ -1758,10 +1765,6 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
- BLOCK_RGFS,
- BLOCK_TGFS,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1780,6 +1783,10 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BUS_BLOCKS,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -1862,6 +1869,29 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
+struct dbg_bus_block {
+ u8 num_of_lines;
+ u8 has_latency_events;
+ __le16 lines_offset;
+};
+
+struct dbg_bus_block_user_data {
+ u8 num_of_lines;
+ u8 has_latency_events;
+ __le16 names_offset;
+};
+
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
/* condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
@@ -1879,17 +1909,21 @@ struct dbg_dump_mem {
__le32 dword1;
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
-#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
/* register data for registers dump */
struct dbg_dump_reg {
__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
/* split header for registers dump */
@@ -1910,20 +1944,24 @@ struct dbg_idle_chk_cond_hdr {
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- __le16 num_entries; /* number of registers entries to check */
- u8 entry_size; /* size of registers entry (in dwords) */
- u8 start_entry; /* index of the first entry to check */
+ __le16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
};
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
__le16 size; /* register size in dwords */
@@ -1996,15 +2034,17 @@ enum dbg_idle_chk_severity_types {
/* Debug Bus block data */
struct dbg_bus_block_data {
- u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
- u8 hw_id; /* HW ID associated with the block */
- u8 line_num; /* Debug line number to select */
- u8 right_shift; /* Number of units to right the debug data (0-3) */
- u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
- u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
- u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
- */
- u8 reserved;
+ __le16 data;
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+ u8 line_num;
+ u8 hw_id;
};
/* Debug Bus Clients */
@@ -2045,6 +2085,14 @@ enum dbg_bus_constraint_ops {
MAX_DBG_BUS_CONSTRAINT_OPS
};
+struct dbg_bus_trigger_state_data {
+ u8 data;
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2078,66 +2126,42 @@ union dbg_bus_storm_eid_params {
/* Debug Bus Storm data */
struct dbg_bus_storm_data {
- u8 fast_enabled;
- u8 fast_mode;
- u8 slow_enabled;
- u8 slow_mode;
+ u8 enabled;
+ u8 mode;
u8 hw_id;
u8 eid_filter_en;
u8 eid_range_not_mask;
u8 cid_filter_en;
union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
__le32 cid;
};
/* Debug Bus data */
struct dbg_bus_data {
- __le32 app_version; /* The tools version number of the application */
- u8 state; /* The current debug bus state */
- u8 hw_dwords; /* HW dwords per cycle */
- u8 next_hw_id; /* Next HW ID to be associated with an input */
- u8 num_enabled_blocks; /* Number of blocks enabled for recording */
- u8 num_enabled_storms; /* Number of Storms enabled for recording */
- u8 target; /* Output target */
- u8 next_trigger_state; /* ID of next trigger state to be added */
- u8 next_constraint_id; /* ID of next filter/trigger constraint to be
- * added.
- */
- u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
- u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
- u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
- * (0/1).
- */
- u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
- u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
- u8 adding_filter; /* If true, the next added constraint belong to the
- * filter. Otherwise, it belongs to the last added
- * trigger state. Valid only if either filter or
- * triggers are enabled.
- */
- u8 filter_pre_trigger; /* Indicates if the recording filter should be
- * applied before the trigger. Valid only if both
- * filter and trigger are enabled (0/1).
- */
- u8 filter_post_trigger; /* Indicates if the recording filter should be
- * applied after the trigger. Valid only if both
- * filter and trigger are enabled (0/1).
- */
- u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
- * Otherwise, each input is assigned a different HW ID
- * (0/1).
- */
- u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
- * recording to this engine (0/1).
- */
- struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
- * only when the target is
- * DBG_BUS_TARGET_ID_PCI.
- */
+ __le32 app_version;
+ u8 state;
+ u8 hw_dwords;
+ __le16 hw_id_mask;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
__le16 reserved;
- struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
- struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+ u8 trigger_en;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ u8 unify_inputs;
+ u8 rcv_from_other_engine;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[88];
+ struct dbg_bus_storm_data storms[6];
};
enum dbg_bus_filter_types {
@@ -2156,12 +2180,6 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
enum dbg_bus_other_engine_modes {
DBG_BUS_OTHER_ENGINE_MODE_NONE,
DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2185,19 +2203,19 @@ enum dbg_bus_pre_trigger_types {
};
enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
+ 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
+ 3,
MAX_DBG_BUS_SEMI_FRAME_MODES
};
/* Debug bus states */
enum dbg_bus_states {
- DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
- DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
- * recording.
- */
- DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
- DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
MAX_DBG_BUS_STATES
};
@@ -2216,11 +2234,8 @@ enum dbg_bus_storm_modes {
/* Debug bus target IDs */
enum dbg_bus_targets {
- /* records debug bus to DBG block internal buffer */
DBG_BUS_TARGET_ID_INT_BUF,
- /* records debug bus to the NW */
DBG_BUS_TARGET_ID_NIG,
- /* records debug bus to a PCI buffer */
DBG_BUS_TARGET_ID_PCI,
MAX_DBG_BUS_TARGETS
};
@@ -2235,48 +2250,45 @@ struct dbg_grc_data {
/* Debug GRC params */
enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
- DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
- DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
- DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
- DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
- DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
- DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
- DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
- DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
- DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
- DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
- DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
- DBG_GRC_PARAM_RESERVED, /* reserved */
- DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
- DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
- DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
- DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
- DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
- DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
- DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
- DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
- DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
- DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
- DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
- DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
- DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
- DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
- DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
- DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
- /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_RESERVED,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_DUMP_NIG,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_NUM_LCIDS,
+ DBG_GRC_PARAM_NUM_LTIDS,
DBG_GRC_PARAM_EXCLUDE_ALL,
- /* preset: include memories for crash dump (1 only) */
DBG_GRC_PARAM_CRASH,
- /* perform dump only if MFW is responding (0/1) */
DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
- DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
@@ -2347,7 +2359,10 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
MAX_DBG_STATUS
};
@@ -2364,25 +2379,22 @@ enum dbg_storms {
/* Idle Check data */
struct idle_chk_data {
- __le32 buf_size; /* Idle check buffer size in dwords */
- u8 buf_size_set; /* Indicates if the idle check buffer size was set
- * (0/1).
- */
+ __le32 buf_size;
+ u8 buf_size_set;
u8 reserved1;
__le16 reserved2;
};
/* Debug Tools data (per HW function) */
struct dbg_tools_data {
- struct dbg_grc_data grc; /* GRC Dump data */
- struct dbg_bus_data bus; /* Debug Bus data */
- struct idle_chk_data idle_chk; /* Idle Check data */
- u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
- */
- u8 chip_id; /* Chip ID (from enum chip_ids) */
- u8 platform_id; /* Platform ID (from enum platform_ids) */
- u8 initialized; /* Indicates if the data was initialized */
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[88];
+ u8 chip_id;
+ u8 platform_id;
+ u8 initialized;
u8 reserved;
};
@@ -2464,6 +2476,12 @@ struct init_qm_vport_params {
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_RESERVED,
+ MAX_CHIP_IDS
+};
struct fw_asserts_ram_section {
__le16 section_ram_line_offset;
@@ -2475,18 +2493,18 @@ struct fw_asserts_ram_section {
};
struct fw_ver_num {
- u8 major; /* Firmware major version number */
- u8 minor; /* Firmware minor version number */
- u8 rev; /* Firmware revision version number */
- u8 eng; /* Firmware engineering version number (for bootleg versions) */
+ u8 major;
+ u8 minor;
+ u8 rev;
+ u8 eng;
};
struct fw_ver_info {
- __le16 tools_ver; /* Tools version number */
- u8 image_id; /* FW image ID (e.g. main) */
+ __le16 tools_ver;
+ u8 image_id;
u8 reserved1;
- struct fw_ver_num num; /* FW version number */
- __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ struct fw_ver_num num;
+ __le32 timestamp;
__le32 reserved2;
};
@@ -2722,7 +2740,6 @@ struct init_read_op {
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
-
};
/* Init operations union */
@@ -2782,6 +2799,7 @@ struct iro {
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
@@ -2805,6 +2823,7 @@ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
*
@@ -2824,6 +2843,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
* for idle check results.
@@ -2840,6 +2860,7 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
* into the specified buffer.
@@ -2860,6 +2881,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
* for mcp trace results.
@@ -2878,6 +2900,7 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
* into the specified buffer.
@@ -2902,6 +2925,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
* for grc trace fifo results.
@@ -2917,6 +2941,7 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
* the specified buffer.
@@ -2938,6 +2963,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
* for the IGU fifo results.
@@ -2954,6 +2980,7 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
+
/**
* @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
* the specified buffer.
@@ -2975,6 +3002,7 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
+
/**
* @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
* buffer size for protection override window results.
@@ -3074,6 +3102,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+
/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
*
@@ -3082,6 +3111,7 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
* @return a string for the specified status
*/
const char *qed_dbg_get_status_str(enum dbg_status status);
+
/**
* @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
* for idle check results (in bytes).
@@ -3116,6 +3146,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
char *results_buf,
u32 *num_errors,
u32 *num_warnings);
+
/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
* for MCP Trace results (in bytes).
@@ -3132,6 +3163,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_mcp_trace_results - Prints MCP Trace results
*
@@ -3146,6 +3178,7 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
* for reg_fifo results (in bytes).
@@ -3162,6 +3195,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_reg_fifo_results - Prints reg fifo results
*
@@ -3176,6 +3210,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
* for igu_fifo results (in bytes).
@@ -3192,6 +3227,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_igu_fifo_results - Prints IGU fifo results
*
@@ -3206,6 +3242,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_protection_override_results_buf_size - Returns the required
* buffer size for protection override results (in bytes).
@@ -3223,6 +3260,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_protection_override_results - Prints protection override
* results.
@@ -3238,6 +3276,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
/**
* @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
* for FW Asserts results (in bytes).
@@ -3254,6 +3293,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+
/**
* @brief qed_print_fw_asserts_results - Prints FW Asserts results
*
@@ -3268,6 +3308,269 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
char *results_buf);
+
+/* Debug Bus blocks */
+static const u32 dbg_bus_blocks[] = {
+ 0x0000000f, /* grc, bb, 15 lines */
+ 0x0000000f, /* grc, k2, 15 lines */
+ 0x00000000,
+ 0x00000000, /* miscs, bb, 0 lines */
+ 0x00000000, /* miscs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* misc, bb, 0 lines */
+ 0x00000000, /* misc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* dbu, bb, 0 lines */
+ 0x00000000, /* dbu, k2, 0 lines */
+ 0x00000000,
+ 0x000f0127, /* pglue_b, bb, 39 lines */
+ 0x0036012a, /* pglue_b, k2, 42 lines */
+ 0x00000000,
+ 0x00000000, /* cnig, bb, 0 lines */
+ 0x00120102, /* cnig, k2, 2 lines */
+ 0x00000000,
+ 0x00000000, /* cpmu, bb, 0 lines */
+ 0x00000000, /* cpmu, k2, 0 lines */
+ 0x00000000,
+ 0x00000001, /* ncsi, bb, 1 lines */
+ 0x00000001, /* ncsi, k2, 1 lines */
+ 0x00000000,
+ 0x00000000, /* opte, bb, 0 lines */
+ 0x00000000, /* opte, k2, 0 lines */
+ 0x00000000,
+ 0x00600085, /* bmb, bb, 133 lines */
+ 0x00600085, /* bmb, k2, 133 lines */
+ 0x00000000,
+ 0x00000000, /* pcie, bb, 0 lines */
+ 0x00e50033, /* pcie, k2, 51 lines */
+ 0x00000000,
+ 0x00000000, /* mcp, bb, 0 lines */
+ 0x00000000, /* mcp, k2, 0 lines */
+ 0x00000000,
+ 0x01180009, /* mcp2, bb, 9 lines */
+ 0x01180009, /* mcp2, k2, 9 lines */
+ 0x00000000,
+ 0x01210104, /* pswhst, bb, 4 lines */
+ 0x01210104, /* pswhst, k2, 4 lines */
+ 0x00000000,
+ 0x01250103, /* pswhst2, bb, 3 lines */
+ 0x01250103, /* pswhst2, k2, 3 lines */
+ 0x00000000,
+ 0x00340101, /* pswrd, bb, 1 lines */
+ 0x00340101, /* pswrd, k2, 1 lines */
+ 0x00000000,
+ 0x01280119, /* pswrd2, bb, 25 lines */
+ 0x01280119, /* pswrd2, k2, 25 lines */
+ 0x00000000,
+ 0x01410109, /* pswwr, bb, 9 lines */
+ 0x01410109, /* pswwr, k2, 9 lines */
+ 0x00000000,
+ 0x00000000, /* pswwr2, bb, 0 lines */
+ 0x00000000, /* pswwr2, k2, 0 lines */
+ 0x00000000,
+ 0x001c0001, /* pswrq, bb, 1 lines */
+ 0x001c0001, /* pswrq, k2, 1 lines */
+ 0x00000000,
+ 0x014a0015, /* pswrq2, bb, 21 lines */
+ 0x014a0015, /* pswrq2, k2, 21 lines */
+ 0x00000000,
+ 0x00000000, /* pglcs, bb, 0 lines */
+ 0x00120006, /* pglcs, k2, 6 lines */
+ 0x00000000,
+ 0x00100001, /* dmae, bb, 1 lines */
+ 0x00100001, /* dmae, k2, 1 lines */
+ 0x00000000,
+ 0x015f0105, /* ptu, bb, 5 lines */
+ 0x015f0105, /* ptu, k2, 5 lines */
+ 0x00000000,
+ 0x01640120, /* tcm, bb, 32 lines */
+ 0x01640120, /* tcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* mcm, bb, 32 lines */
+ 0x01640120, /* mcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* ucm, bb, 32 lines */
+ 0x01640120, /* ucm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* xcm, bb, 32 lines */
+ 0x01640120, /* xcm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* ycm, bb, 32 lines */
+ 0x01640120, /* ycm, k2, 32 lines */
+ 0x00000000,
+ 0x01640120, /* pcm, bb, 32 lines */
+ 0x01640120, /* pcm, k2, 32 lines */
+ 0x00000000,
+ 0x01840062, /* qm, bb, 98 lines */
+ 0x01840062, /* qm, k2, 98 lines */
+ 0x00000000,
+ 0x01e60021, /* tm, bb, 33 lines */
+ 0x01e60021, /* tm, k2, 33 lines */
+ 0x00000000,
+ 0x02070107, /* dorq, bb, 7 lines */
+ 0x02070107, /* dorq, k2, 7 lines */
+ 0x00000000,
+ 0x00600185, /* brb, bb, 133 lines */
+ 0x00600185, /* brb, k2, 133 lines */
+ 0x00000000,
+ 0x020e0019, /* src, bb, 25 lines */
+ 0x020c001a, /* src, k2, 26 lines */
+ 0x00000000,
+ 0x02270104, /* prs, bb, 4 lines */
+ 0x02270104, /* prs, k2, 4 lines */
+ 0x00000000,
+ 0x022b0133, /* tsdm, bb, 51 lines */
+ 0x022b0133, /* tsdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* msdm, bb, 51 lines */
+ 0x022b0133, /* msdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* usdm, bb, 51 lines */
+ 0x022b0133, /* usdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* xsdm, bb, 51 lines */
+ 0x022b0133, /* xsdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* ysdm, bb, 51 lines */
+ 0x022b0133, /* ysdm, k2, 51 lines */
+ 0x00000000,
+ 0x022b0133, /* psdm, bb, 51 lines */
+ 0x022b0133, /* psdm, k2, 51 lines */
+ 0x00000000,
+ 0x025e010c, /* tsem, bb, 12 lines */
+ 0x025e010c, /* tsem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* msem, bb, 12 lines */
+ 0x025e010c, /* msem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* usem, bb, 12 lines */
+ 0x025e010c, /* usem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* xsem, bb, 12 lines */
+ 0x025e010c, /* xsem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* ysem, bb, 12 lines */
+ 0x025e010c, /* ysem, k2, 12 lines */
+ 0x00000000,
+ 0x025e010c, /* psem, bb, 12 lines */
+ 0x025e010c, /* psem, k2, 12 lines */
+ 0x00000000,
+ 0x026a000d, /* rss, bb, 13 lines */
+ 0x026a000d, /* rss, k2, 13 lines */
+ 0x00000000,
+ 0x02770106, /* tmld, bb, 6 lines */
+ 0x02770106, /* tmld, k2, 6 lines */
+ 0x00000000,
+ 0x027d0106, /* muld, bb, 6 lines */
+ 0x027d0106, /* muld, k2, 6 lines */
+ 0x00000000,
+ 0x02770005, /* yuld, bb, 5 lines */
+ 0x02770005, /* yuld, k2, 5 lines */
+ 0x00000000,
+ 0x02830107, /* xyld, bb, 7 lines */
+ 0x027d0107, /* xyld, k2, 7 lines */
+ 0x00000000,
+ 0x00000000, /* ptld, bb, 0 lines */
+ 0x00000000, /* ptld, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* ypld, bb, 0 lines */
+ 0x00000000, /* ypld, k2, 0 lines */
+ 0x00000000,
+ 0x028a010e, /* prm, bb, 14 lines */
+ 0x02980110, /* prm, k2, 16 lines */
+ 0x00000000,
+ 0x02a8000d, /* pbf_pb1, bb, 13 lines */
+ 0x02a8000d, /* pbf_pb1, k2, 13 lines */
+ 0x00000000,
+ 0x02a8000d, /* pbf_pb2, bb, 13 lines */
+ 0x02a8000d, /* pbf_pb2, k2, 13 lines */
+ 0x00000000,
+ 0x02a8000d, /* rpb, bb, 13 lines */
+ 0x02a8000d, /* rpb, k2, 13 lines */
+ 0x00000000,
+ 0x00600185, /* btb, bb, 133 lines */
+ 0x00600185, /* btb, k2, 133 lines */
+ 0x00000000,
+ 0x02b50117, /* pbf, bb, 23 lines */
+ 0x02b50117, /* pbf, k2, 23 lines */
+ 0x00000000,
+ 0x02cc0006, /* rdif, bb, 6 lines */
+ 0x02cc0006, /* rdif, k2, 6 lines */
+ 0x00000000,
+ 0x02d20006, /* tdif, bb, 6 lines */
+ 0x02d20006, /* tdif, k2, 6 lines */
+ 0x00000000,
+ 0x02d80003, /* cdu, bb, 3 lines */
+ 0x02db000e, /* cdu, k2, 14 lines */
+ 0x00000000,
+ 0x02e9010d, /* ccfc, bb, 13 lines */
+ 0x02f60117, /* ccfc, k2, 23 lines */
+ 0x00000000,
+ 0x02e9010d, /* tcfc, bb, 13 lines */
+ 0x02f60117, /* tcfc, k2, 23 lines */
+ 0x00000000,
+ 0x030d0133, /* igu, bb, 51 lines */
+ 0x030d0133, /* igu, k2, 51 lines */
+ 0x00000000,
+ 0x03400106, /* cau, bb, 6 lines */
+ 0x03400106, /* cau, k2, 6 lines */
+ 0x00000000,
+ 0x00000000, /* rgfs, bb, 0 lines */
+ 0x00000000, /* rgfs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* rgsrc, bb, 0 lines */
+ 0x00000000, /* rgsrc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* tgfs, bb, 0 lines */
+ 0x00000000, /* tgfs, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* tgsrc, bb, 0 lines */
+ 0x00000000, /* tgsrc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* umac, bb, 0 lines */
+ 0x00120006, /* umac, k2, 6 lines */
+ 0x00000000,
+ 0x00000000, /* xmac, bb, 0 lines */
+ 0x00000000, /* xmac, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* dbg, bb, 0 lines */
+ 0x00000000, /* dbg, k2, 0 lines */
+ 0x00000000,
+ 0x0346012b, /* nig, bb, 43 lines */
+ 0x0346011d, /* nig, k2, 29 lines */
+ 0x00000000,
+ 0x00000000, /* wol, bb, 0 lines */
+ 0x001c0002, /* wol, k2, 2 lines */
+ 0x00000000,
+ 0x00000000, /* bmbn, bb, 0 lines */
+ 0x00210008, /* bmbn, k2, 8 lines */
+ 0x00000000,
+ 0x00000000, /* ipc, bb, 0 lines */
+ 0x00000000, /* ipc, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* nwm, bb, 0 lines */
+ 0x0371000b, /* nwm, k2, 11 lines */
+ 0x00000000,
+ 0x00000000, /* nws, bb, 0 lines */
+ 0x037c0009, /* nws, k2, 9 lines */
+ 0x00000000,
+ 0x00000000, /* ms, bb, 0 lines */
+ 0x00120004, /* ms, k2, 4 lines */
+ 0x00000000,
+ 0x00000000, /* phy_pcie, bb, 0 lines */
+ 0x00e5001a, /* phy_pcie, k2, 26 lines */
+ 0x00000000,
+ 0x00000000, /* led, bb, 0 lines */
+ 0x00000000, /* led, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* avs_wrap, bb, 0 lines */
+ 0x00000000, /* avs_wrap, k2, 0 lines */
+ 0x00000000,
+ 0x00000000, /* bar0_map, bb, 0 lines */
+ 0x00000000, /* bar0_map, k2, 0 lines */
+ 0x00000000,
+};
+
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3589,37 +3892,37 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
(IRO[44].base + ((pf_id) * IRO[44].m1))
-static const struct iro iro_arr[47] = {
+static const struct iro iro_arr[49] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb0, 0x80, 0x0, 0x0, 0x80},
- {0x6318, 0x20, 0x0, 0x0, 0x20},
+ {0x6518, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
- {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ {0x4c40, 0x0, 0x0, 0x0, 0x78},
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4c38, 0x0, 0x0, 0x0, 0x78},
{0x4990, 0x0, 0x0, 0x0, 0x78},
- {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0x7f48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x60f8, 0x10, 0x0, 0x0, 0x10},
- {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x61f8, 0x10, 0x0, 0x0, 0x10},
+ {0xbd20, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
{0x4b60, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0x53a0, 0x80, 0x4, 0x0, 0x4},
- {0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ {0xc7c8, 0x0, 0x0, 0x0, 0x4},
{0x4ba0, 0x80, 0x0, 0x0, 0x20},
- {0x8050, 0x40, 0x0, 0x0, 0x30},
- {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x8150, 0x40, 0x0, 0x0, 0x30},
+ {0xec70, 0x60, 0x0, 0x0, 0x60},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xf188, 0x78, 0x0, 0x0, 0x78},
+ {0xf1b0, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
- {0xacf0, 0x0, 0x0, 0x0, 0xf0},
- {0xade0, 0x8, 0x0, 0x0, 0x8},
+ {0xaef8, 0x0, 0x0, 0x0, 0xf0},
+ {0xafe8, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
@@ -3627,16 +3930,18 @@ static const struct iro iro_arr[47] = {
{0x0, 0x8, 0x0, 0x0, 0x8},
{0x200, 0x10, 0x8, 0x0, 0x8},
{0xb78, 0x10, 0x8, 0x0, 0x2},
- {0xd888, 0x38, 0x0, 0x0, 0x24},
- {0x12c38, 0x10, 0x0, 0x0, 0x8},
- {0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0xd9a8, 0x38, 0x0, 0x0, 0x24},
+ {0x12988, 0x10, 0x0, 0x0, 0x8},
+ {0x11fa0, 0x38, 0x0, 0x0, 0x18},
+ {0xa580, 0x38, 0x0, 0x0, 0x10},
{0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
- {0xdd08, 0x48, 0x0, 0x0, 0x38},
+ {0xde28, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x5020, 0x10, 0x0, 0x0, 0x10},
+ {0xc9b0, 0x30, 0x0, 0x0, 0x10},
+ {0xeec0, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -3724,361 +4029,359 @@ static const struct iro iro_arr[47] = {
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30817
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_RLPFENABLE_RT_OFFSET 30871
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30851
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_WFQPFCRD_RT_OFFSET 30905
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
-
-#define RUNTIME_ARRAY_SIZE 33927
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
+
+#define RUNTIME_ARRAY_SIZE 34309
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -4307,7 +4610,7 @@ struct xstorm_eth_conn_ag_ctx {
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -4340,7 +4643,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 ereserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -4627,6 +4930,7 @@ enum eth_error_code {
ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
ETH_FILTERS_VNI_ADD_FAIL_FULL,
ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL,
MAX_ETH_ERROR_CODE
};
@@ -4879,6 +5183,39 @@ enum gft_logic_filter_type {
MAX_GFT_LOGIC_FILTER_TYPE
};
+struct rx_add_openflow_filter_data {
+ __le16 action_icid;
+ u8 priority;
+ u8 reserved0;
+ __le32 tenant_id;
+ __le16 dst_mac_hi;
+ __le16 dst_mac_mid;
+ __le16 dst_mac_lo;
+ __le16 src_mac_hi;
+ __le16 src_mac_mid;
+ __le16 src_mac_lo;
+ __le16 vlan_id;
+ __le16 l2_eth_type;
+ u8 ipv4_dscp;
+ u8 ipv4_frag_type;
+ u8 ipv4_over_ip;
+ u8 tenant_id_exists;
+ __le32 ipv4_dst_addr;
+ __le32 ipv4_src_addr;
+ __le16 l4_dst_port;
+ __le16 l4_src_port;
+};
+
+struct rx_create_gft_action_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct rx_create_openflow_action_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -4956,7 +5293,7 @@ struct rx_update_gft_filter_data {
u8 vport_id;
u8 filter_type;
u8 filter_action;
- u8 reserved;
+ u8 assert_on_error;
};
/* Ramrod data for rx queue start ramrod */
@@ -5102,203 +5439,6 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct gft_cam_line {
- __le32 camline;
-#define GFT_CAM_LINE_VALID_MASK 0x1
-#define GFT_CAM_LINE_VALID_SHIFT 0
-#define GFT_CAM_LINE_DATA_MASK 0x3FFF
-#define GFT_CAM_LINE_DATA_SHIFT 1
-#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
-#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
-#define GFT_CAM_LINE_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_RESERVED1_SHIFT 29
-};
-
-struct gft_cam_line_mapped {
- __le32 camline;
-#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
-#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
-#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
-#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
-#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
-};
-
-union gft_cam_line_union {
- struct gft_cam_line cam_line;
- struct gft_cam_line_mapped cam_line_mapped;
-};
-
-enum gft_profile_ip_version {
- GFT_PROFILE_IPV4 = 0,
- GFT_PROFILE_IPV6 = 1,
- MAX_GFT_PROFILE_IP_VERSION
-};
-
-enum gft_profile_upper_protocol_type {
- GFT_PROFILE_ROCE_PROTOCOL = 0,
- GFT_PROFILE_RROCE_PROTOCOL = 1,
- GFT_PROFILE_FCOE_PROTOCOL = 2,
- GFT_PROFILE_ICMP_PROTOCOL = 3,
- GFT_PROFILE_ARP_PROTOCOL = 4,
- GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
- GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
- GFT_PROFILE_TCP_PROTOCOL = 7,
- GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
- GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
- GFT_PROFILE_UDP_PROTOCOL = 10,
- GFT_PROFILE_USER_IP_1_INNER = 11,
- GFT_PROFILE_USER_IP_2_OUTER = 12,
- GFT_PROFILE_USER_ETH_1_INNER = 13,
- GFT_PROFILE_USER_ETH_2_OUTER = 14,
- GFT_PROFILE_RAW = 15,
- MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
-};
-
-struct gft_ram_line {
- __le32 low32bits;
-#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
-#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
-#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
-#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
-#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
-#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
-#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
-#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
-#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
-#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
-#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
-#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
-#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
-#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
-#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
-#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
-#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
-#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
-#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
-#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
-#define GFT_RAM_LINE_TTL_MASK 0x1
-#define GFT_RAM_LINE_TTL_SHIFT 18
-#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
-#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
-#define GFT_RAM_LINE_RESERVED0_MASK 0x1
-#define GFT_RAM_LINE_RESERVED0_SHIFT 20
-#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
-#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
-#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
-#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
-#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
-#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
-#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
-#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
-#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
-#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
-#define GFT_RAM_LINE_DST_PORT_MASK 0x1
-#define GFT_RAM_LINE_DST_PORT_SHIFT 30
-#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
-#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
- __le32 high32bits;
-#define GFT_RAM_LINE_DSCP_MASK 0x1
-#define GFT_RAM_LINE_DSCP_SHIFT 0
-#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
-#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
-#define GFT_RAM_LINE_DST_IP_MASK 0x1
-#define GFT_RAM_LINE_DST_IP_SHIFT 2
-#define GFT_RAM_LINE_SRC_IP_MASK 0x1
-#define GFT_RAM_LINE_SRC_IP_SHIFT 3
-#define GFT_RAM_LINE_PRIORITY_MASK 0x1
-#define GFT_RAM_LINE_PRIORITY_SHIFT 4
-#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
-#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
-#define GFT_RAM_LINE_VLAN_MASK 0x1
-#define GFT_RAM_LINE_VLAN_SHIFT 6
-#define GFT_RAM_LINE_DST_MAC_MASK 0x1
-#define GFT_RAM_LINE_DST_MAC_SHIFT 7
-#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
-#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
-#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
-#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
-#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
-#define GFT_RAM_LINE_RESERVED1_SHIFT 10
-};
-
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
struct xstorm_eth_conn_agctxdq_ext_ldpart {
u8 reserved0;
u8 eth_state;
@@ -5511,7 +5651,7 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5528,6 +5668,43 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
__le32 reg4;
};
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 eth_state;
@@ -5740,7 +5917,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
- __le16 quota;
+ __le16 ereserved1;
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
@@ -5748,6 +5925,200 @@ struct xstorm_eth_hw_conn_ag_ctx {
__le16 conn_dpi;
};
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+struct gft_profile_key {
+ __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+ __le32 lo;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 hi;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -5827,12 +6198,9 @@ struct rdma_init_func_hdr {
u8 cnq_start_offset;
u8 num_cnqs;
u8 cq_ring_mode;
- u8 cnp_vlan_priority;
- __le32 cnp_send_timeout;
- u8 cnp_dscp;
u8 vf_id;
u8 vf_valid;
- u8 reserved[5];
+ u8 reserved[3];
};
struct rdma_init_func_ramrod_data {
@@ -5856,54 +6224,55 @@ enum rdma_ramrod_cmd_id {
};
struct rdma_register_tid_ramrod_data {
- __le32 flags;
-#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
-#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
-#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
-#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
+ __le16 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
u8 flags1;
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
u8 key;
u8 length_hi;
u8 vf_id;
u8 vf_valid;
__le16 pd;
+ __le16 reserved2;
__le32 length_lo;
__le32 itid;
- __le32 reserved2;
+ __le32 reserved3;
struct regpair va;
struct regpair pbl_base;
struct regpair dif_error_addr;
struct regpair dif_runt_addr;
- __le32 reserved3[2];
+ __le32 reserved4[2];
};
struct rdma_resize_cq_output_params {
@@ -6149,6 +6518,233 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
struct mstorm_rdma_conn_ag_ctx {
u8 byte0;
u8 byte1;
@@ -6438,233 +7034,6 @@ struct ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
- u8 reserved0;
- u8 state;
- u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
- u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
- u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
- u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
- u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
- u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
- u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 snd_nxt_psn;
- __le32 reg4;
-};
-
struct xstorm_rdma_conn_ag_ctx {
u8 reserved0;
u8 state;
@@ -6696,8 +7065,8 @@ struct xstorm_rdma_conn_ag_ctx {
#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
@@ -7093,16 +7462,35 @@ struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+struct roce_events_stats {
+ __le16 silent_drops;
+ __le16 rnr_naks_sent;
+ __le32 retransmit_count;
+ __le32 icrc_error_count;
+ __le32 reserved;
+};
+
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
+ ROCE_EVENT_CREATE_UD_QP,
+ ROCE_EVENT_DESTROY_UD_QP,
MAX_ROCE_EVENT_OPCODE
};
+struct roce_init_func_params {
+ u8 ll2_queue_id;
+ u8 cnp_vlan_priority;
+ u8 cnp_dscp;
+ u8 reserved;
+ __le32 cnp_send_timeout;
+};
+
struct roce_init_func_ramrod_data {
struct rdma_init_func_ramrod_data rdma;
+ struct roce_init_func_params roce;
};
struct roce_modify_qp_req_ramrod_data {
@@ -7222,6 +7610,8 @@ enum roce_ramrod_cmd_id {
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
+ ROCE_RAMROD_CREATE_UD_QP,
+ ROCE_RAMROD_DESTROY_UD_QP,
MAX_ROCE_RAMROD_CMD_ID
};
@@ -7299,13 +7689,6 @@ struct mstorm_roce_resp_conn_ag_ctx {
__le32 reg1;
};
-enum roce_flavor {
- PLAIN_ROCE /* RoCE v1 */ ,
- RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
- RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
- MAX_ROCE_FLAVOR
-};
-
struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
@@ -7416,8 +7799,8 @@ struct tstorm_roce_resp_conn_ag_ctx {
u8 flags0;
#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
@@ -8097,7 +8480,7 @@ struct xstorm_roce_resp_conn_ag_ctx {
__le16 irq_prod;
__le16 word3;
__le16 word4;
- __le16 word5;
+ __le16 ereserved1;
__le16 irq_cons;
u8 rxmit_opcode;
u8 byte4;
@@ -8200,6 +8583,812 @@ struct ystorm_roce_resp_conn_ag_ctx {
__le32 reg3;
};
+enum roce_flavor {
+ PLAIN_ROCE,
+ RROCE_IPV4,
+ RROCE_IPV6,
+ MAX_ROCE_FLAVOR
+};
+
+struct ystorm_iwarp_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+struct pstorm_iwarp_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+struct xstorm_iwarp_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct xstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+ u8 flags2;
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 sq_comp_cons;
+ __le16 sq_tx_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 rewinded_snd_max;
+ __le32 rd_msn;
+ __le16 irq_prod_via_msdm;
+ __le16 irq_cons;
+ __le16 hq_cons_th_or_mpa_data;
+ __le16 hq_cons;
+ __le32 atom_msn;
+ __le32 orq_cons;
+ __le32 orq_cons_th;
+ u8 byte7;
+ u8 max_ord;
+ u8 wqe_data_pad_bytes;
+ u8 former_hq_prod;
+ u8 irq_prod_via_msem;
+ u8 byte12;
+ u8 max_pkt_pdu_size_lo;
+ u8 max_pkt_pdu_size_hi;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 e5_reserved4;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 shared_queue_page_addr_lo;
+ __le32 shared_queue_page_addr_hi;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 unaligned_nxt_seq;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 orq_cache_idx;
+ u8 hq_prod;
+ __le16 sq_tx_cons_th;
+ u8 orq_prod;
+ u8 irq_cons;
+ __le16 sq_tx_cons;
+ __le16 conn_dpi;
+ __le16 rq_prod;
+ __le32 snd_seq;
+ __le32 last_hq_sequence;
+};
+
+struct tstorm_iwarp_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct mstorm_iwarp_conn_st_ctx {
+ __le32 reserved[32];
+};
+
+struct ustorm_iwarp_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+struct iwarp_conn_context {
+ struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+};
+
+struct iwarp_create_qp_ramrod_data {
+ u8 flags;
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+ u8 reserved1;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 rq_num_pages;
+ __le32 reserved3[2];
+ struct regpair qp_handle_for_cqe;
+ struct rdma_srq_id srq_id;
+ __le32 cq_cid_for_sq;
+ __le32 cq_cid_for_rq;
+ __le16 dpi;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 reserved2[6];
+};
+
+enum iwarp_eqe_async_opcode {
+ IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
+ IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_CID_CLEANED,
+ IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
+ IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
+ IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
+ MAX_IWARP_EQE_ASYNC_OPCODE
+};
+
+struct iwarp_eqe_data_mpa_async_completion {
+ __le16 ulp_data_len;
+ u8 reserved[6];
+};
+
+struct iwarp_eqe_data_tcp_async_completion {
+ __le16 ulp_data_len;
+ u8 mpa_handshake_mode;
+ u8 reserved[5];
+};
+
+enum iwarp_eqe_sync_opcode {
+ IWARP_EVENT_TYPE_TCP_OFFLOAD =
+ 11,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
+ IWARP_EVENT_TYPE_CREATE_QP,
+ IWARP_EVENT_TYPE_QUERY_QP,
+ IWARP_EVENT_TYPE_MODIFY_QP,
+ IWARP_EVENT_TYPE_DESTROY_QP,
+ MAX_IWARP_EQE_SYNC_OPCODE
+};
+
+enum iwarp_fw_return_code {
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
+ IWARP_CONN_ERROR_TCP_CONNECTION_RST,
+ IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_ERROR_REJECT,
+ IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER,
+ IWARP_CONN_ERROR_MPA_RST,
+ IWARP_CONN_ERROR_MPA_FIN,
+ IWARP_CONN_ERROR_MPA_RTR_MISMATCH,
+ IWARP_CONN_ERROR_MPA_INSUF_IRD,
+ IWARP_CONN_ERROR_MPA_INVALID_PACKET,
+ IWARP_CONN_ERROR_MPA_LOCAL_ERROR,
+ IWARP_CONN_ERROR_MPA_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_TERMINATE,
+ IWARP_QP_IN_ERROR_GOOD_CLOSE,
+ IWARP_QP_IN_ERROR_BAD_CLOSE,
+ IWARP_EXCEPTION_DETECTED_LLP_CLOSED,
+ IWARP_EXCEPTION_DETECTED_LLP_RESET,
+ IWARP_EXCEPTION_DETECTED_IRQ_FULL,
+ IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
+ IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
+ IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
+ IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
+ IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC,
+ IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR,
+ IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR,
+ IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED,
+ MAX_IWARP_FW_RETURN_CODE
+};
+
+struct iwarp_init_func_params {
+ u8 ll2_ooo_q_index;
+ u8 reserved1[7];
+};
+
+struct iwarp_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+ struct tcp_init_params tcp;
+ struct iwarp_init_func_params iwarp;
+};
+
+enum iwarp_modify_qp_new_state_type {
+ IWARP_MODIFY_QP_STATE_CLOSING = 1,
+ IWARP_MODIFY_QP_STATE_ERROR =
+ 2,
+ MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
+};
+
+struct iwarp_modify_qp_ramrod_data {
+ __le16 transition_to_state;
+ __le16 flags;
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
+ __le32 reserved3[3];
+ __le32 reserved4[8];
+};
+
+struct mpa_rq_params {
+ __le32 ird;
+ __le32 ord;
+};
+
+struct mpa_ulp_buffer {
+ struct regpair addr;
+ __le16 len;
+ __le16 reserved[3];
+};
+
+struct mpa_outgoing_params {
+ u8 crc_needed;
+ u8 reject;
+ u8 reserved[6];
+ struct mpa_rq_params out_rq;
+ struct mpa_ulp_buffer outgoing_ulp_buffer;
+};
+
+struct iwarp_mpa_offload_ramrod_data {
+ struct mpa_outgoing_params common;
+ __le32 tcp_cid;
+ u8 mode;
+ u8 tcp_connect_side;
+ u8 rtr_pref;
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+ u8 reserved2;
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ struct regpair shared_queue_addr;
+ u8 stats_counter_id;
+ u8 reserved3[15];
+};
+
+struct iwarp_offload_params {
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 stats_counter_id;
+ u8 mpa_mode;
+ u8 reserved[10];
+};
+
+struct iwarp_query_qp_output_params {
+ __le32 flags;
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ u8 reserved1[4];
+};
+
+struct iwarp_query_qp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum iwarp_ramrod_cmd_id {
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
+ 11,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+ IWARP_RAMROD_CMD_ID_CREATE_QP,
+ IWARP_RAMROD_CMD_ID_QUERY_QP,
+ IWARP_RAMROD_CMD_ID_MODIFY_QP,
+ IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ MAX_IWARP_RAMROD_CMD_ID
+};
+
+struct iwarp_rxmit_stats_drv {
+ struct regpair tx_go_to_slow_start_event_cnt;
+ struct regpair tx_fast_retransmit_event_cnt;
+};
+
+struct iwarp_tcp_offload_ramrod_data {
+ struct iwarp_offload_params iwarp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+enum mpa_negotiation_mode {
+ MPA_NEGOTIATION_TYPE_BASIC = 1,
+ MPA_NEGOTIATION_TYPE_ENHANCED = 2,
+ MAX_MPA_NEGOTIATION_MODE
+};
+
+enum mpa_rtr_type {
+ MPA_RTR_TYPE_NONE = 0,
+ MPA_RTR_TYPE_ZERO_SEND = 1,
+ MPA_RTR_TYPE_ZERO_WRITE = 2,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3,
+ MPA_RTR_TYPE_ZERO_READ = 4,
+ MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5,
+ MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7,
+ MAX_MPA_RTR_TYPE
+};
+
+struct unaligned_opaque_data {
+ __le16 first_mpa_offset;
+ u8 tcp_payload_offset;
+ u8 flags;
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+ __le32 cid;
+};
+
+struct mstorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 rcq_cons;
+ __le16 rcq_cons_th;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ustorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ystorm_iwarp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
struct ystorm_fcoe_conn_st_ctx {
u8 func_mode;
u8 cos;
@@ -9222,7 +10411,7 @@ struct xstorm_iscsi_conn_ag_ctx {
u8 byte13;
u8 byte14;
u8 byte15;
- u8 byte16;
+ u8 ereserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
@@ -10466,6 +11655,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
@@ -10591,6 +11782,12 @@ struct nvm_cfg1_glob {
u32 led_global_settings;
u32 generic_cont1;
u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
u32 mbi_date;
u32 misc_sig;
u32 device_capabilities;
@@ -10758,6 +11955,8 @@ struct static_init {
u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
};
+#define NVM_MAGIC_VALUE 0x669955aa
+
enum nvm_image_type {
NVM_TYPE_TIM1 = 0x01,
NVM_TYPE_TIM2 = 0x02,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0a8fde629991..b069ad088269 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -40,31 +40,17 @@
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
-enum cminterface {
- MCM_SEC,
- MCM_PRI,
- UCM_SEC,
- UCM_PRI,
- TCM_SEC,
- TCM_PRI,
- YCM_SEC,
- YCM_PRI,
- XCM_SEC,
- XCM_PRI,
- NUM_OF_CM_INTERFACES
-};
-
-/* general constants */
+/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
-/* feature enable */
+/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
-/* other PQ constants */
+/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
#define QM_WFQ_UPPER_BOUND 62500000
@@ -106,20 +92,21 @@ enum cminterface {
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 0x2
-#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
/* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
_STRUCT_SIZE
@@ -146,16 +133,17 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- /* enable RLs for all VOQs */
+ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
- /* write RL period */
+ /* Write RL period */
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
@@ -167,7 +155,8 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
@@ -180,14 +169,15 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
- /* write RL period (use timer 0 only) */
+ /* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
@@ -200,7 +190,8 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
@@ -208,7 +199,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
}
/* Prepare runtime init values to allocate PBF command queue lines for
- * the specified VOQ
+ * the specified VOQ.
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
@@ -232,7 +223,7 @@ static void qed_cmdq_lines_rt_init(
{
u8 tc, voq, port_id, num_tcs_in_port;
- /* clear PBF lines for all VOQs */
+ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
@@ -285,7 +276,7 @@ static void qed_btb_blocks_rt_init(
if (!port_params[port_id].active)
continue;
- /* subtract headroom blocks */
+ /* Subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
@@ -305,7 +296,7 @@ static void qed_btb_blocks_rt_init(
phys_blocks = (usable_blocks - pure_lb_blocks) /
num_tcs_in_port;
- /* init physical TCs */
+ /* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >>
tc) & 0x1) != 1)
@@ -317,7 +308,7 @@ static void qed_btb_blocks_rt_init(
phys_blocks);
}
- /* init pure LB TC */
+ /* Init pure LB TC */
temp = LB_VOQ(port_id);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
pure_lb_blocks);
@@ -338,24 +329,24 @@ static void qed_tx_pq_map_rt_init(
QM_PF_QUEUE_GROUP_SIZE;
u16 i, pq_id, pq_group;
- /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
- /* set mapping from PQ group to PF */
+ /* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(p_params->pf_id));
- /* set PQ sizes */
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(p_params->num_vf_cids));
- /* go over all Tx PQs */
+ /* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
@@ -366,17 +357,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pq_params[i].vport_id <
MAX_QM_GLOBAL_RLS);
- /* update first Tx PQ of VPORT/TC */
+ /* Update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- /* create new VP PQ */
+ /* Create new VP PQ */
pq_ids[p_params->pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
- /* map VP PQ to VOQ and PF */
+
+ /* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET +
first_tx_pq_id,
@@ -388,7 +380,7 @@ static void qed_tx_pq_map_rt_init(
if (p_params->pq_params[i].rl_valid && !rl_valid)
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
- /* fill PQ map entry */
+ /* Fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg,
@@ -400,18 +392,16 @@ static void qed_tx_pq_map_rt_init(
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
- /* write PQ map entry to CAM */
+ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
- /* set base address */
+ /* Set base address */
STORE_RT_REG(p_hwfn,
QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
- /* check if VF PQ */
+
+ /* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
- /* if PQ is associated with a VF, add indication
- * to PQ VF mask
- */
tx_pq_vf_mask[pq_id /
QM_PF_QUEUE_GROUP_SIZE] |=
BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
@@ -421,16 +411,12 @@ static void qed_tx_pq_map_rt_init(
}
}
- /* store Tx PQ VF mask to size select register */
- for (i = 0; i < num_tx_pq_vf_masks; i++) {
- if (tx_pq_vf_mask[i]) {
- u32 addr;
-
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
- STORE_RT_REG(p_hwfn, addr,
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn,
+ QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
- }
- }
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -440,23 +426,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
- u16 i, pq_id;
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
/* a single other PQ group is used in each PF,
* where PQ group i is used in PF i.
*/
- u16 pq_group = pf_id;
- u32 pq_size = num_pf_cids + num_tids;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
- u32 mem_addr_4kb = base_mem_addr_4kb;
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
- /* map PQ group to PF */
+ /* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* set base address */
+
+ /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn,
@@ -485,7 +473,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -514,7 +502,7 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
@@ -535,7 +523,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u32 inc_val;
u8 tc, i;
- /* go over all PF VPORTs */
+ /* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
if (!vport_params[i].vport_wfq)
@@ -544,7 +532,7 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration");
+ "Invalid VPORT WFQ weight configuration\n");
return -1;
}
@@ -578,17 +566,17 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- /* go over all PF VPORTs */
+ /* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
@@ -617,7 +605,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
- /* check if timeout while waiting for SDM command ready */
+ /* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Timeout when waiting for QM SDM command ready signal\n");
@@ -701,16 +689,16 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
QM_OTHER_PQS_PER_PF;
u8 tc, i;
- /* clear first Tx PQ ID array for each VPORT */
+ /* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
- /* map Other PQs (if any) */
+ /* Map Other PQs (if any) */
qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
p_params->num_pf_cids, p_params->num_tids, 0);
- /* map Tx PQs */
+ /* Map Tx PQs */
qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
if (p_params->pf_wfq)
@@ -736,7 +724,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -750,7 +738,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
u32 inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
return -1;
}
@@ -766,17 +754,18 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u16 vport_pq_id;
+ u32 inc_val;
u8 tc;
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
return -1;
}
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = first_tx_pq_id[tc];
-
+ vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
qed_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
@@ -793,12 +782,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
if (vport_id >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
return -1;
}
@@ -818,15 +807,15 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
- /* set command's PQ type */
+ /* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
- /* set PQ bit in mask (stop command only) */
+ /* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
- /* if last PQ or end of PQ mask, write command */
+ /* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
@@ -962,8 +951,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
-#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define PARSER_ETH_CONN_CM_HDR 0
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
@@ -971,40 +962,26 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 pf_id)
{
- union gft_cam_line_union camline;
- struct gft_ram_line ramline;
- u32 *p_ramline, i;
-
- p_ramline = (u32 *)&ramline;
+ u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
+ pf_id * RAM_LINE_SIZE;
/*stop using gft logic */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
- memset(&camline, 0, sizeof(union gft_cam_line_union));
- qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camline.cam_line_mapped.camline);
- memset(&ramline, 0, sizeof(ramline));
-
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
- u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
-
- hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
-
- qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
- }
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
+ qed_wr(p_hwfn, p_ptt, hw_addr, 0);
+ qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
}
void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 pf_id, bool tcp, bool udp,
bool ipv4, bool ipv6)
{
- u32 rfs_cm_hdr_event_id, *p_ramline;
union gft_cam_line_union camline;
struct gft_ram_line ramline;
- int i;
+ u32 rfs_cm_hdr_event_id;
rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- p_ramline = (u32 *)&ramline;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1024,18 +1001,20 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
camline.cam_line_mapped.camline = 0;
- /* cam line is now valid!! */
+ /* Cam line is now valid!! */
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
/* filters are per PF!! */
SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
SET_FIELD(camline.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(camline.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
@@ -1059,34 +1038,38 @@ void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
GFT_PROFILE_IPV6);
}
- /* write characteristics to cam */
+ /* Write characteristics to cam */
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camline.cam_line_mapped.camline);
camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
PRS_REG_GFT_CAM +
CAM_LINE_SIZE * pf_id);
- /* write line to RAM - compare to filter 4 tuple */
- ramline.low32bits = 0;
- ramline.high32bits = 0;
- SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
-
- /* each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(p_ramline + i));
-
- /* set default profile so that no filter match will happen */
- ramline.low32bits = 0xffff;
- ramline.high32bits = 0xffff;
-
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- qed_wr(p_hwfn, p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
- *(p_ramline + i));
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramline.lo = 0;
+ ramline.hi = 0;
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* Each iteration write to reg */
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ ramline.lo);
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
+ ramline.hi);
+
+ /* Set default profile so that no filter match will happen */
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
+ ramline.lo);
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
+ ramline.hi);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 4a2e7be5bf72..e3f368882f46 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -158,6 +158,7 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
GFP_KERNEL);
if (!rt_data->init_val) {
kfree(rt_data->b_valid);
+ rt_data->b_valid = NULL;
return -ENOMEM;
}
@@ -167,7 +168,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
void qed_init_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->rt_data.init_val);
+ p_hwfn->rt_data.init_val = NULL;
kfree(p_hwfn->rt_data.b_valid);
+ p_hwfn->rt_data.b_valid = NULL;
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
@@ -525,6 +528,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
}
kfree(p_hwfn->unzip_buf);
+ p_hwfn->unzip_buf = NULL;
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 40f057edeafc..661412c275f7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2328,6 +2328,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
SB_ATTN_ALIGNED_SIZE(p_hwfn),
p_sb->sb_attn, p_sb->sb_phys);
kfree(p_sb);
+ p_hwfn->p_sb_attn = NULL;
}
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
@@ -2679,6 +2680,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
p_sb->sb_info.sb_virt,
p_sb->sb_info.sb_phys);
kfree(p_sb);
+ p_hwfn->p_sp_sb = NULL;
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -3157,6 +3159,7 @@ static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->sp_dpc);
+ p_hwfn->sp_dpc = NULL;
}
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 339c91dfa658..43a20a6fd1b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -44,7 +44,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -186,7 +185,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
DP_ERR(p_hwfn,
"Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
p_params->num_queues,
- p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+ p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
return -EINVAL;
}
@@ -375,7 +374,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
p_tcp->srtt = cpu_to_le16(p_conn->srtt);
p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
- p_tcp->ts_time = cpu_to_le32(p_conn->ts_time);
p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
@@ -400,8 +398,6 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->mss = cpu_to_le16(p_conn->mss);
p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
- dval = p_conn->ts_ticks_per_second;
- p_tcp->ts_ticks_per_second = cpu_to_le32(dval);
wval = p_conn->da_timeout_value;
p_tcp->da_timeout_value = cpu_to_le16(wval);
p_tcp->ack_frequency = p_conn->ack_frequency;
@@ -822,29 +818,32 @@ void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
kfree(p_conn);
}
-struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
if (!p_iscsi_info)
- return NULL;
+ return -ENOMEM;
INIT_LIST_HEAD(&p_iscsi_info->free_list);
- return p_iscsi_info;
+
+ p_hwfn->p_iscsi_info = p_iscsi_info;
+ return 0;
}
-void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info)
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn)
{
- spin_lock_init(&p_iscsi_info->lock);
+ spin_lock_init(&p_hwfn->p_iscsi_info->lock);
}
-void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info)
+void qed_iscsi_free(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_conn *p_conn = NULL;
+ if (!p_hwfn->p_iscsi_info)
+ return;
+
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
struct qed_iscsi_conn, list_entry);
@@ -854,7 +853,8 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn,
}
}
- kfree(p_iscsi_info);
+ kfree(p_hwfn->p_iscsi_info);
+ p_hwfn->p_iscsi_info = NULL;
}
static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index ae98f772cbc0..225c75b02a06 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -57,13 +57,11 @@ extern const struct qed_ll2_ops qed_ll2_ops_pass;
#endif
#if IS_ENABLED(CONFIG_QED_ISCSI)
-struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
-void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info);
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
-void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info);
+void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
* @brief - Fills provided statistics struct with statistics.
@@ -74,12 +72,15 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn,
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
-static inline struct qed_iscsi_info *qed_iscsi_alloc(
- struct qed_hwfn *p_hwfn) { return NULL; }
-static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
-static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
+static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
+
static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats) {}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 746fed4099c8..93dd781cf61d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -2301,14 +2300,25 @@ static int qed_tunn_configure(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
struct qed_tunnel_info *tun;
tun = &hwfn->cdev->tunnel;
+ if (IS_PF(cdev)) {
+ p_ptt = qed_ptt_acquire(hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
- rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
- if (rc)
+ if (rc) {
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
return rc;
+ }
if (IS_PF_SRIOV(hwfn)) {
u16 vxlan_port, geneve_port;
@@ -2325,6 +2335,8 @@ static int qed_tunn_configure(struct qed_dev *cdev,
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 09c86411918c..f67ed6d39dfd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,7 +38,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/stddef.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <linux/bitops.h>
@@ -1921,7 +1920,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
mutex_unlock(&p_ll2_conn->mutex);
}
-struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ll2_info *p_ll2_connections;
u8 i;
@@ -1931,28 +1930,31 @@ struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
sizeof(struct qed_ll2_info), GFP_KERNEL);
if (!p_ll2_connections) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
- return NULL;
+ return -ENOMEM;
}
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
p_ll2_connections[i].my_id = i;
- return p_ll2_connections;
+ p_hwfn->p_ll2_info = p_ll2_connections;
+ return 0;
}
-void qed_ll2_setup(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections)
+void qed_ll2_setup(struct qed_hwfn *p_hwfn)
{
int i;
for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
- mutex_init(&p_ll2_connections[i].mutex);
+ mutex_init(&p_hwfn->p_ll2_info[i].mutex);
}
-void qed_ll2_free(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections)
+void qed_ll2_free(struct qed_hwfn *p_hwfn)
{
- kfree(p_ll2_connections);
+ if (!p_hwfn->p_ll2_info)
+ return;
+
+ kfree(p_hwfn->p_ll2_info);
+ p_hwfn->p_ll2_info = NULL;
}
static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 31a409033c41..2c07d0ed971a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -306,27 +306,24 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
*
* @param p_hwfn
*
- * @return pointer to alocated qed_ll2_info or NULL
+ * @return int
*/
-struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ll2_setup - Inits LL2 connections set
*
* @param p_hwfn
- * @param p_ll2_connections
*
*/
-void qed_ll2_setup(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections);
+void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_ll2_free - Releases LL2 connections set
*
* @param p_hwfn
- * @param p_ll2_connections
*
*/
-void qed_ll2_free(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2_connections);
+void qed_ll2_free(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 537d1236a4fe..c5bb80b9afc1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -34,7 +34,6 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
@@ -282,6 +281,9 @@ int qed_fill_dev_info(struct qed_dev *cdev,
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
+ qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mbi_version);
+
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
@@ -336,6 +338,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
cdev->protocol = params->protocol;
if (params->is_vf)
@@ -607,6 +610,18 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
+{
+ /* Calling the disable function will make sure that any
+ * currently-running function is completed. The following call to the
+ * enable function makes this sequence a flush-like operation.
+ */
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_enable(p_hwfn->sp_dpc);
+ }
+}
+
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
@@ -618,6 +633,8 @@ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
synchronize_irq(cdev->int_params.msix_table[id].vector);
else
synchronize_irq(cdev->pdev->irq);
+
+ qed_slowpath_tasklet_flush(p_hwfn);
}
static void qed_slowpath_irq_free(struct qed_dev *cdev)
@@ -1112,17 +1129,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
return 0;
}
-static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
- char ver_str[VER_SIZE])
+static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
{
int i;
memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
-
- memcpy(cdev->ver_str, ver_str, VER_SIZE);
- cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static u32 qed_sb_init(struct qed_dev *cdev,
@@ -1676,7 +1689,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
- .set_id = &qed_set_id,
+ .set_name = &qed_set_name,
.update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 7266b36a2655..fc49c75e6c4b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -177,6 +177,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
}
kfree(p_hwfn->mcp_info);
+ p_hwfn->mcp_info = NULL;
return 0;
}
@@ -1522,6 +1523,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ /* Read the address of the nvm_cfg */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read the offset of nvm_cfg1 */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, mbi_version);
+ *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
+ mbi_ver_addr) &
+ (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
+
+ return 0;
+}
+
int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 2b09b8545236..40247593e772 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -256,6 +256,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
+ * @brief Get the MBI version value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver);
+
+/**
* @brief Get media type value of the port.
*
* @param cdev - qed dev pointer
@@ -482,7 +494,7 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * \
+ ((_p_hwfn)->cdev->num_ports_in_engine * \
qed_device_num_engines((_p_hwfn)->cdev)))
struct qed_mcp_info {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index db96670192c7..000636530111 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -99,7 +99,7 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
p_history->head_idx++;
}
-struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
@@ -109,7 +109,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n");
- return NULL;
+ return -EINVAL;
}
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
@@ -119,12 +119,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown amount of connections\n");
- return NULL;
+ return -EINVAL;
}
p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
if (!p_ooo_info)
- return NULL;
+ return -ENOMEM;
p_ooo_info->cid_base = cid_base;
p_ooo_info->max_num_archipelagos = max_num_archipelagos;
@@ -164,7 +164,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
- return p_ooo_info;
+ p_hwfn->p_ooo_info = p_ooo_info;
+ return 0;
no_history_mem:
kfree(p_ooo_info->p_archipelagos_mem);
@@ -172,7 +173,7 @@ no_archipelagos_mem:
kfree(p_ooo_info->p_isles_mem);
no_isles_mem:
kfree(p_ooo_info);
- return NULL;
+ return -ENOMEM;
}
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
@@ -249,19 +250,23 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
&p_ooo_info->free_buffers_list);
}
-void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+void qed_ooo_setup(struct qed_hwfn *p_hwfn)
{
- qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
- memset(p_ooo_info->ooo_history.p_cqes, 0,
- p_ooo_info->ooo_history.num_of_cqes *
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0,
+ p_hwfn->p_ooo_info->ooo_history.num_of_cqes *
sizeof(struct ooo_opaque));
- p_ooo_info->ooo_history.head_idx = 0;
+ p_hwfn->p_ooo_info->ooo_history.head_idx = 0;
}
-void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
+void qed_ooo_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info;
struct qed_ooo_buffer *p_buffer;
+ if (!p_ooo_info)
+ return;
+
qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
while (!list_empty(&p_ooo_info->free_buffers_list)) {
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
@@ -282,6 +287,7 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info)
kfree(p_ooo_info->p_archipelagos_mem);
kfree(p_ooo_info->ooo_history.p_cqes);
kfree(p_ooo_info);
+ p_hwfn->p_ooo_info = NULL;
}
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 791ad0f8b759..e8ed40b848f5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -88,7 +88,11 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
-struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn);
void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
@@ -97,10 +101,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info);
-void qed_ooo_setup(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
-
-void qed_ooo_free(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info);
-
void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer);
@@ -140,8 +140,14 @@ static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe) {}
-static inline struct qed_ooo_info *qed_ooo_alloc(
- struct qed_hwfn *p_hwfn) { return NULL; }
+static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {}
static inline void
qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
@@ -152,12 +158,6 @@ static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{}
-static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn,
- struct qed_ooo_info *p_ooo_info) {}
-
-static inline void qed_ooo_free(struct qed_hwfn *p_hwfn,
- struct qed_ooo_info *p_ooo_info) {}
-
static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct qed_ooo_buffer *p_buffer) {}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 434a164a76ed..5a90d69dc2f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -80,7 +80,7 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* MFW doesn't support resource locking, first PF on the port
* has lock ownership.
*/
- if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
return 0;
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
@@ -108,7 +108,7 @@ static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
if (rc == -EINVAL) {
/* MFW doesn't support locking, first PF has lock ownership */
- if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
rc = 0;
} else {
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 1ae73b2d6d1e..f14772b9cda3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -592,15 +592,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-#define PGLCS_REG_DBG_SELECT \
+#define PGLCS_REG_DBG_SELECT_K2 \
0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
0x001d18UL
-#define PGLCS_REG_DBG_SHIFT \
+#define PGLCS_REG_DBG_SHIFT_K2 \
0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID \
+#define PGLCS_REG_DBG_FORCE_VALID_K2 \
0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL
@@ -612,7 +612,7 @@
0x009050UL
#define MISCS_REG_RESET_PL_HV \
0x009060UL
-#define MISCS_REG_RESET_PL_HV_2 \
+#define MISCS_REG_RESET_PL_HV_2_K2 \
0x009150UL
#define DMAE_REG_DBG_SELECT \
0x00c510UL
@@ -644,15 +644,15 @@
0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL
-#define UMAC_REG_DBG_SELECT \
+#define UMAC_REG_DBG_SELECT_K2 \
0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
0x051098UL
-#define UMAC_REG_DBG_SHIFT \
+#define UMAC_REG_DBG_SHIFT_K2 \
0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID \
+#define UMAC_REG_DBG_FORCE_VALID_K2 \
0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME \
+#define UMAC_REG_DBG_FORCE_FRAME_K2 \
0x0510a4UL
#define MCP2_REG_DBG_SELECT \
0x052400UL
@@ -924,15 +924,15 @@
0x4c160cUL
#define XYLD_REG_DBG_FORCE_FRAME \
0x4c1610UL
-#define YULD_REG_DBG_SELECT \
+#define YULD_REG_DBG_SELECT_BB_K2 \
0x4c9600UL
-#define YULD_REG_DBG_DWORD_ENABLE \
+#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \
0x4c9604UL
-#define YULD_REG_DBG_SHIFT \
+#define YULD_REG_DBG_SHIFT_BB_K2 \
0x4c9608UL
-#define YULD_REG_DBG_FORCE_VALID \
+#define YULD_REG_DBG_FORCE_VALID_BB_K2 \
0x4c960cUL
-#define YULD_REG_DBG_FORCE_FRAME \
+#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \
0x4c9610UL
#define TMLD_REG_DBG_SELECT \
0x4d1600UL
@@ -994,35 +994,35 @@
0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \
0x580714UL
-#define WOL_REG_DBG_SELECT \
+#define WOL_REG_DBG_SELECT_K2 \
0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE \
+#define WOL_REG_DBG_DWORD_ENABLE_K2 \
0x600144UL
-#define WOL_REG_DBG_SHIFT \
+#define WOL_REG_DBG_SHIFT_K2 \
0x600148UL
-#define WOL_REG_DBG_FORCE_VALID \
+#define WOL_REG_DBG_FORCE_VALID_K2 \
0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME \
+#define WOL_REG_DBG_FORCE_FRAME_K2 \
0x600150UL
-#define BMBN_REG_DBG_SELECT \
+#define BMBN_REG_DBG_SELECT_K2 \
0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
0x610144UL
-#define BMBN_REG_DBG_SHIFT \
+#define BMBN_REG_DBG_SHIFT_K2 \
0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID \
+#define BMBN_REG_DBG_FORCE_VALID_K2 \
0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME \
+#define BMBN_REG_DBG_FORCE_FRAME_K2 \
0x610150UL
-#define NWM_REG_DBG_SELECT \
+#define NWM_REG_DBG_SELECT_K2 \
0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE \
+#define NWM_REG_DBG_DWORD_ENABLE_K2 \
0x8000f0UL
-#define NWM_REG_DBG_SHIFT \
+#define NWM_REG_DBG_SHIFT_K2 \
0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID \
+#define NWM_REG_DBG_FORCE_VALID_K2 \
0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME \
+#define NWM_REG_DBG_FORCE_FRAME_K2\
0x8000fcUL
#define PBF_REG_DBG_SELECT \
0xd80060UL
@@ -1244,35 +1244,35 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
-#define NWS_REG_DBG_SELECT \
+#define NWS_REG_DBG_SELECT_K2 \
0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE \
+#define NWS_REG_DBG_DWORD_ENABLE_K2 \
0x70012cUL
-#define NWS_REG_DBG_SHIFT \
+#define NWS_REG_DBG_SHIFT_K2 \
0x700130UL
-#define NWS_REG_DBG_FORCE_VALID \
+#define NWS_REG_DBG_FORCE_VALID_K2 \
0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME \
+#define NWS_REG_DBG_FORCE_FRAME_K2 \
0x700138UL
-#define MS_REG_DBG_SELECT \
+#define MS_REG_DBG_SELECT_K2 \
0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE \
+#define MS_REG_DBG_DWORD_ENABLE_K2 \
0x6a022cUL
-#define MS_REG_DBG_SHIFT \
+#define MS_REG_DBG_SHIFT_K2 \
0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID \
+#define MS_REG_DBG_FORCE_VALID_K2 \
0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME \
+#define MS_REG_DBG_FORCE_FRAME_K2 \
0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT \
+#define PCIE_REG_DBG_COMMON_SELECT_K2 \
0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
0x0543a8UL
#define MISC_REG_RESET_PL_UA \
0x008050UL
@@ -1328,85 +1328,85 @@
0x128170cUL
#define UCM_REG_SM_TASK_CTX \
0x1281710UL
-#define XSEM_REG_SLOW_DBG_EMPTY \
+#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1401140UL
#define XSEM_REG_SYNC_DBG_EMPTY \
0x1401160UL
-#define XSEM_REG_SLOW_DBG_ACTIVE \
+#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1401400UL
-#define XSEM_REG_SLOW_DBG_MODE \
+#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1401404UL
-#define XSEM_REG_DBG_FRAME_MODE \
+#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
-#define XSEM_REG_DBG_MODE1_CFG \
+#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
0x1440000UL
#define YSEM_REG_SYNC_DBG_EMPTY \
0x1501160UL
-#define YSEM_REG_SLOW_DBG_ACTIVE \
+#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1501400UL
-#define YSEM_REG_SLOW_DBG_MODE \
+#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1501404UL
-#define YSEM_REG_DBG_FRAME_MODE \
+#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
-#define YSEM_REG_DBG_MODE1_CFG \
+#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
0x1540000UL
-#define PSEM_REG_SLOW_DBG_EMPTY \
+#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1601140UL
#define PSEM_REG_SYNC_DBG_EMPTY \
0x1601160UL
-#define PSEM_REG_SLOW_DBG_ACTIVE \
+#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1601400UL
-#define PSEM_REG_SLOW_DBG_MODE \
+#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1601404UL
-#define PSEM_REG_DBG_FRAME_MODE \
+#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
-#define PSEM_REG_DBG_MODE1_CFG \
+#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
0x1640000UL
-#define TSEM_REG_SLOW_DBG_EMPTY \
+#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1701140UL
#define TSEM_REG_SYNC_DBG_EMPTY \
0x1701160UL
-#define TSEM_REG_SLOW_DBG_ACTIVE \
+#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1701400UL
-#define TSEM_REG_SLOW_DBG_MODE \
+#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1701404UL
-#define TSEM_REG_DBG_FRAME_MODE \
+#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
-#define TSEM_REG_DBG_MODE1_CFG \
+#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
0x1740000UL
-#define MSEM_REG_SLOW_DBG_EMPTY \
+#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1801140UL
#define MSEM_REG_SYNC_DBG_EMPTY \
0x1801160UL
-#define MSEM_REG_SLOW_DBG_ACTIVE \
+#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1801400UL
-#define MSEM_REG_SLOW_DBG_MODE \
+#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1801404UL
-#define MSEM_REG_DBG_FRAME_MODE \
+#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
-#define MSEM_REG_DBG_MODE1_CFG \
+#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
-#define USEM_REG_SLOW_DBG_EMPTY \
+#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
-#define USEM_REG_SLOW_DBG_ACTIVE \
+#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
0x1901400UL
-#define USEM_REG_SLOW_DBG_MODE \
+#define USEM_REG_SLOW_DBG_MODE_BB_K2 \
0x1901404UL
-#define USEM_REG_DBG_FRAME_MODE \
+#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
-#define USEM_REG_DBG_MODE1_CFG \
+#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
@@ -1430,7 +1430,7 @@
0x340800UL
#define BRB_REG_BIG_RAM_DATA \
0x341500UL
-#define SEM_FAST_REG_STALL_0 \
+#define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL
#define SEM_FAST_REG_STALLED \
0x000494UL
@@ -1480,37 +1480,37 @@
4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
-#define NWS_REG_NWS_CMU \
+#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
0x0006c4UL
-#define MS_REG_MS_CMU \
+#define MS_REG_MS_CMU_K2 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
- 0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_REG_PHY0 \
+#define PHY_PCIE_REG_PHY0_K2 \
0x620000UL
-#define PHY_PCIE_REG_PHY1 \
+#define PHY_PCIE_REG_PHY1_K2 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 56289d7cd306..eb1a5cfc49c0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2432,10 +2432,6 @@ qed_rdma_register_tid(void *rdma_cxt,
params->page_size_log - 12);
SET_FIELD(p_ramrod->flags,
- RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
- p_hwfn->p_rdma_info->last_tid);
-
- SET_FIELD(p_ramrod->flags,
RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
params->remote_read);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3357bbefa445..ef77de4de5f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -270,28 +270,23 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param num_elem number of elements in the eq
*
- * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ * @return int
*/
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
- u16 num_elem);
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief qed_eq_setup - Reset the SPQ to its start state.
+ * @brief qed_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_eq_setup(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq);
+void qed_eq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ * @brief qed_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_eq_free(struct qed_hwfn *p_hwfn,
- struct qed_eq *p_eq);
+void qed_eq_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_eq_prod_update - update the FW with default EQ producer
@@ -342,28 +337,23 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
*
* @param p_hwfn
*
- * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ * @return int
*/
-struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_setup - Reset the ConsQ to its start
- * state.
+ * @brief qed_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_consq_setup(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq);
+void qed_consq_setup(struct qed_hwfn *p_hwfn);
/**
* @brief qed_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void qed_consq_free(struct qed_hwfn *p_hwfn,
- struct qed_consq *p_consq);
+void qed_consq_free(struct qed_hwfn *p_hwfn);
/**
* @file
@@ -401,6 +391,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn
* @param mode
* @param allow_npar_tx_switch
@@ -409,6 +400,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch);
@@ -442,6 +434,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index bc3694e91b85..ab09975343cb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -185,22 +185,20 @@ static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
}
static void
-__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct qed_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
-
- if (tun_type->b_mode_enabled)
- *p_enable_tx_clas = 1;
}
static void
-qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct qed_tunn_update_type *tun_type,
- u8 *p_update_port, __le16 *p_port,
+ u8 *p_update_port,
+ __le16 *p_port,
struct qed_tunn_update_udp_port *p_udp_port)
{
- __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = cpu_to_le16(p_udp_port->port);
@@ -219,33 +217,27 @@ qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
qed_set_tunn_ports(p_tun, p_src);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
- p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
@@ -261,17 +253,18 @@ static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
}
static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn)
{
if (p_tunn->vxlan_port.b_update_port)
- qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ qed_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
- qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ qed_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+ qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@@ -289,33 +282,29 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
qed_set_tunn_ports(p_tun, p_src);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
@@ -412,7 +401,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (p_tunn)
- qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->cdev->tunnel);
return rc;
}
@@ -443,6 +433,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
@@ -477,7 +468,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index f6423a139ca0..dede73f41e61 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -403,14 +403,14 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
{
struct qed_eq *p_eq;
/* Allocate EQ struct */
p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq)
- return NULL;
+ return -ENOMEM;
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -426,24 +426,28 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
qed_int_register_cb(p_hwfn, qed_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
- return p_eq;
+ p_hwfn->p_eq = p_eq;
+ return 0;
eq_allocate_fail:
- qed_eq_free(p_hwfn, p_eq);
- return NULL;
+ kfree(p_eq);
+ return -ENOMEM;
}
-void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
+void qed_eq_setup(struct qed_hwfn *p_hwfn)
{
- qed_chain_reset(&p_eq->chain);
+ qed_chain_reset(&p_hwfn->p_eq->chain);
}
-void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
+void qed_eq_free(struct qed_hwfn *p_hwfn)
{
- if (!p_eq)
+ if (!p_hwfn->p_eq)
return;
- qed_chain_free(p_hwfn->cdev, &p_eq->chain);
- kfree(p_eq);
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
+
+ kfree(p_hwfn->p_eq);
+ p_hwfn->p_eq = NULL;
}
/***************************************************************************
@@ -583,8 +587,8 @@ void qed_spq_free(struct qed_hwfn *p_hwfn)
}
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
- ;
kfree(p_spq);
+ p_hwfn->p_spq = NULL;
}
int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
@@ -934,14 +938,14 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
return rc;
}
-struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+int qed_consq_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_consq *p_consq;
/* Allocate ConsQ struct */
p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq)
- return NULL;
+ return -ENOMEM;
/* Allocate and initialize EQ chain*/
if (qed_chain_alloc(p_hwfn->cdev,
@@ -952,22 +956,26 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
0x80, &p_consq->chain))
goto consq_allocate_fail;
- return p_consq;
+ p_hwfn->p_consq = p_consq;
+ return 0;
consq_allocate_fail:
- qed_consq_free(p_hwfn, p_consq);
- return NULL;
+ kfree(p_consq);
+ return -ENOMEM;
}
-void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
+void qed_consq_setup(struct qed_hwfn *p_hwfn)
{
- qed_chain_reset(&p_consq->chain);
+ qed_chain_reset(&p_hwfn->p_consq->chain);
}
-void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
+void qed_consq_free(struct qed_hwfn *p_hwfn)
{
- if (!p_consq)
+ if (!p_hwfn->p_consq)
return;
- qed_chain_free(p_hwfn->cdev, &p_consq->chain);
- kfree(p_consq);
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
+
+ kfree(p_hwfn->p_consq);
+ p_hwfn->p_consq = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f5ed54d611ec..71e392fe1d97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2209,7 +2209,7 @@ static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
if (b_update_required) {
u16 geneve_port;
- rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
status = PFVF_STATUS_FAILURE;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9b4f08b6f9b9..694c09b8997e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -197,7 +197,6 @@ struct qede_dev {
#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
struct qed_int_info int_info;
- unsigned char primary_mac[ETH_ALEN];
/* Smaller private varaiant of the RTNL lock */
struct mutex qede_lock;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index a9e7379313db..6e7747b9b95e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -313,7 +313,6 @@ static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
.ieee_setets = qede_dcbnl_ieee_setets,
.ieee_getapp = qede_dcbnl_ieee_getapp,
.ieee_setapp = qede_dcbnl_ieee_setapp,
- .getdcbx = qede_dcbnl_getdcbx,
.ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
.ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
.getstate = qede_dcbnl_getstate,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 172b292241a5..6a03d3e66cff 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -506,6 +506,14 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.autoneg = false;
params.forced_speed = base->speed;
switch (base->speed) {
+ case SPEED_1000:
+ if (!(current_link.supported_caps &
+ QED_LM_1000baseT_Full_BIT)) {
+ DP_INFO(edev, "1G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_1000baseT_Full_BIT;
+ break;
case SPEED_10000:
if (!(current_link.supported_caps &
QED_LM_10000baseKR_Full_BIT)) {
@@ -1282,7 +1290,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping;
- int i, idx, val;
+ int i, idx;
+ u16 val;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@@ -1297,14 +1306,15 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
/* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
first_bd->data.bd_flags.bitfields = val;
val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
- first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+ val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(val);
/* Map skb linear data for DMA and set in the first BD */
mapping = dma_map_single(&edev->pdev->dev, skb->data,
@@ -1317,10 +1327,10 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* update the first BD with the actual num BDs */
first_bd->data.nbds = 1;
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */
- val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- txq->tx_db.data.bd_prod = val;
+ val = qed_chain_get_prod_idx(&txq->tx_pbl);
+ txq->tx_db.data.bd_prod = cpu_to_le16(val);
/* wmb makes sure that the BDs data is updated before updating the
* producer, otherwise FW may read old data from the BDs.
@@ -1351,7 +1361,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 333876c19d7d..13955a3bd3b3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -495,12 +495,16 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
+ __qede_lock(edev);
+
/* MAC hints take effect only if we haven't set one already */
- if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+ if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
+ __qede_unlock(edev);
return;
+ }
ether_addr_copy(edev->ndev->dev_addr, mac);
- ether_addr_copy(edev->primary_mac, mac);
+ __qede_unlock(edev);
}
void qede_fill_rss_params(struct qede_dev *edev,
@@ -1061,41 +1065,51 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
{
struct qede_dev *edev = netdev_priv(ndev);
struct sockaddr *addr = p;
- int rc;
-
- ASSERT_RTNL(); /* @@@TBD To be removed */
+ int rc = 0;
- DP_INFO(edev, "Set_mac_addr called\n");
+ /* Make sure the state doesn't transition while changing the MAC.
+ * Also, all flows accessing the dev_addr field are doing that under
+ * this lock.
+ */
+ __qede_lock(edev);
if (!is_valid_ether_addr(addr->sa_data)) {
DP_NOTICE(edev, "The MAC address is not valid\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
- DP_NOTICE(edev, "qed prevents setting MAC\n");
- return -EINVAL;
+ DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
+ addr->sa_data);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ ndev->dev_addr);
+ if (rc)
+ goto out;
}
ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
- if (!netif_running(ndev)) {
- DP_NOTICE(edev, "The device is currently down\n");
- return 0;
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "The device is currently down\n");
+ goto out;
}
- /* Remove the previous primary mac */
- rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
- edev->primary_mac);
- if (rc)
- return rc;
-
- edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+ edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
- /* Add MAC filter according to the new unicast HW MAC address */
- ether_addr_copy(edev->primary_mac, ndev->dev_addr);
- return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
- edev->primary_mac);
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ ndev->dev_addr);
+out:
+ __qede_unlock(edev);
+ return rc;
}
static int
@@ -1200,7 +1214,7 @@ void qede_config_rx_mode(struct net_device *ndev)
* (configrue / leave the primary mac)
*/
rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
- edev->primary_mac);
+ edev->ndev->dev_addr);
if (rc)
goto out;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 7b6f41d06245..892eb98290f6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
/* Unmap the data and free skb */
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
{
- u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ u16 idx = txq->sw_tx_cons;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd;
@@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split)
{
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ u16 idx = txq->sw_tx_prod;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0;
@@ -333,8 +333,9 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
struct sw_rx_data *metadata, u16 padding, u16 length)
{
struct qede_tx_queue *txq = fp->xdp_tx;
- u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct eth_tx_1st_bd *first_bd;
+ u16 idx = txq->sw_tx_prod;
+ u16 val;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
txq->stopped_cnt++;
@@ -346,9 +347,11 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
memset(first_bd, 0, sizeof(*first_bd));
first_bd->data.bd_flags.bitfields =
BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
- first_bd->data.bitfields |=
- (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ first_bd->data.bitfields |= cpu_to_le16(val);
first_bd->data.nbds = 1;
/* We can safely ignore the offset, as it's 0 for XDP */
@@ -363,7 +366,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* Mark the fastpath for future XDP doorbell */
fp->xdp_xmit = 1;
@@ -393,14 +396,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
qed_chain_consume(&txq->tx_pbl);
- idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_cons;
dma_unmap_page(&edev->pdev->dev,
txq->sw_tx_ring.xdp[idx].mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(txq->sw_tx_ring.xdp[idx].page);
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
}
@@ -430,7 +433,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
bytes_compl += len;
pkts_compl++;
- txq->sw_tx_cons++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++;
}
@@ -1424,7 +1427,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct eth_tx_2nd_bd *second_bd = NULL;
struct eth_tx_3rd_bd *third_bd = NULL;
struct eth_tx_bd *tx_data_bd = NULL;
- u16 txq_index;
+ u16 txq_index, val = 0;
u8 nbd = 0;
dma_addr_t mapping;
int rc, frag_idx = 0, ipv6_ext = 0;
@@ -1455,7 +1458,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#endif
/* Fill the entry in the SW ring and the BDs in the FW ring */
- idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl);
@@ -1513,8 +1516,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- first_bd->data.bitfields |=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
}
/* Legacy FW had flipped behavior in regard to this bit -
@@ -1522,8 +1525,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* packets when it didn't need to.
*/
if (unlikely(txq->is_legacy))
- first_bd->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -1587,11 +1589,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
- first_bd->data.bitfields |=
- (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
- ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
+ first_bd->data.bitfields = cpu_to_le16(val);
+
/* Handle fragmented skb */
/* special handle for frags inside 2nd and 3rd bds.. */
while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
@@ -1639,7 +1642,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Advance packet producer only before sending the packet since mapping
* of pages may fail.
*/
- txq->sw_tx_prod++;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */
txq->tx_db.data.bd_prod =
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 38b77bbfe4ee..00c70625f8a4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -259,7 +259,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
/* Notify qed of the name change */
if (!edev->ops || !edev->ops->common)
goto done;
- edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+ edev->ops->common->set_name(edev->cdev, edev->ndev->name);
break;
case NETDEV_CHANGEADDR:
edev = netdev_priv(ndev);
@@ -618,6 +618,12 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, info, sizeof(*info));
+ /* As ethtool doesn't have the ability to show WoL behavior as
+ * 'default', if device supports it declare it's enabled.
+ */
+ if (edev->dev_info.common.wol_support)
+ edev->wol_enabled = true;
+
INIT_LIST_HEAD(&edev->vlan_list);
return edev;
@@ -846,6 +852,43 @@ static void qede_update_pf_params(struct qed_dev *cdev)
qed_ops->common->update_pf_params(cdev, &pf_params);
}
+#define QEDE_FW_VER_STR_SIZE 80
+
+static void qede_log_probe(struct qede_dev *edev)
+{
+ struct qed_dev_info *p_dev_info = &edev->dev_info.common;
+ u8 buf[QEDE_FW_VER_STR_SIZE];
+ size_t left_size;
+
+ snprintf(buf, QEDE_FW_VER_STR_SIZE,
+ "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
+ p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
+ p_dev_info->fw_eng,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
+ QED_MFW_VERSION_3_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
+ QED_MFW_VERSION_2_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
+ QED_MFW_VERSION_1_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
+ QED_MFW_VERSION_0_OFFSET);
+
+ left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
+ if (p_dev_info->mbi_version && left_size)
+ snprintf(buf + strlen(buf), left_size,
+ " [MBI %d.%d.%d]",
+ (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
+ QED_MBI_VERSION_2_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
+ QED_MBI_VERSION_1_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
+ QED_MBI_VERSION_0_OFFSET);
+
+ pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
+ PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
+ buf, edev->ndev->name);
+}
+
enum qede_probe_mode {
QEDE_PROBE_NORMAL,
};
@@ -924,7 +967,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err4;
}
- edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+ edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */
if (!is_vf)
@@ -939,8 +982,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
- DP_INFO(edev, "Ending successfully qede probe\n");
-
+ qede_log_probe(edev);
return 0;
err4:
@@ -1066,12 +1108,15 @@ static int qede_set_num_queues(struct qede_dev *edev)
return rc;
}
-static void qede_free_mem_sb(struct qede_dev *edev,
- struct qed_sb_info *sb_info)
+static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
+ u16 sb_id)
{
- if (sb_info->sb_virt)
+ if (sb_info->sb_virt) {
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
+ memset(sb_info, 0, sizeof(*sb_info));
+ }
}
/* This function allocates fast-path status block memory */
@@ -1298,12 +1343,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) {
- size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+ size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.xdp)
goto err;
} else {
- size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+ size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.skbs)
goto err;
@@ -1313,7 +1358,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- TX_RING_SIZE,
+ txq->num_tx_buffers,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
@@ -1328,7 +1373,7 @@ err:
/* This function frees all memory of a single fp */
static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
{
- qede_free_mem_sb(edev, fp->sb_info);
+ qede_free_mem_sb(edev, fp->sb_info, fp->id);
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
@@ -1890,9 +1935,10 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
if (!is_locked)
__qede_lock(edev);
- qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
+ qede_roce_dev_event_close(edev);
+
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -1988,9 +2034,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
- /* Add primary mac and set Rx filters */
- ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
-
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 24f06e2ef43e..9b2280badaf7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -244,6 +244,7 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
rx_filter = QED_PTP_FILTER_ALL;
break;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
index f00657ce7c8f..c0030fb8d842 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -221,8 +221,8 @@ static void qede_roce_changeaddr(struct qede_dev *edev)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
}
-struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
- *edev)
+static struct qede_roce_event_work *
+qede_roce_get_free_event_node(struct qede_dev *edev)
{
struct qede_roce_event_work *event_node = NULL;
struct list_head *list_node = NULL;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 1e594351a60f..89831adb8eb7 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1418,8 +1418,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->tx_enable_tstamp(first_desc);
}
- if (!tqueue->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 78efb2822b86..ad9c4ded2b90 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6068,6 +6068,7 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
init->rx_filter = HWTSTAMP_FILTER_ALL;
rc = efx_ptp_change_mode(efx, true, 0);
if (!rc)
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b7ce3fbb5375..e82b4b70b7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -549,9 +549,11 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
- int port;
- int duplex;
- int speed;
+ u32 speed_mask;
+ u32 speed10;
+ u32 speed100;
+ u32 speed1000;
+ u32 duplex;
};
struct mii_regs {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index f3d9305e5f70..8a86340ff2d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -45,15 +45,17 @@ static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
if (hw->ps) {
value |= GMAC_CONTROL_TE;
- if (hw->ps == SPEED_1000) {
- value &= ~GMAC_CONTROL_PS;
- } else {
- value |= GMAC_CONTROL_PS;
-
- if (hw->ps == SPEED_10)
- value &= ~GMAC_CONTROL_FES;
- else
- value |= GMAC_CONTROL_FES;
+ value &= ~hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
}
}
@@ -531,9 +533,11 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->mac = &dwmac1000_ops;
mac->dma = &dwmac1000_dma_ops;
- mac->link.port = GMAC_CONTROL_PS;
mac->link.duplex = GMAC_CONTROL_DM;
- mac->link.speed = GMAC_CONTROL_FES;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
mac->mii.addr_shift = 11;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 1b3609105484..8ef517356313 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,9 +175,11 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
mac->mac = &dwmac100_ops;
mac->dma = &dwmac100_dma_ops;
- mac->link.port = MAC_CONTROL_PS;
mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = MAC_CONTROL_PS;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
mac->mii.addr_shift = 11;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 48793f2e9307..f233bf8b4ebb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -35,15 +35,17 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
if (hw->ps) {
value |= GMAC_CONFIG_TE;
- if (hw->ps == SPEED_1000) {
- value &= ~GMAC_CONFIG_PS;
- } else {
- value |= GMAC_CONFIG_PS;
-
- if (hw->ps == SPEED_10)
- value &= ~GMAC_CONFIG_FES;
- else
- value |= GMAC_CONFIG_FES;
+ value &= hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
}
}
@@ -747,9 +749,11 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- mac->link.port = GMAC_CONFIG_PS;
mac->link.duplex = GMAC_CONFIG_DM;
- mac->link.speed = GMAC_CONFIG_FES;
+ mac->link.speed10 = GMAC_CONFIG_PS;
+ mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->mii.addr = GMAC_MDIO_ADDR;
mac->mii.data = GMAC_MDIO_DATA;
mac->mii.addr_shift = 21;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 33efe7038cab..a916e13624eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -104,7 +104,7 @@ struct stmmac_priv {
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- int oldlink;
+ bool oldlink;
int speed;
int oldduplex;
unsigned int flow_ctrl;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a74c481401c4..f158273eab9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -644,6 +644,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
ptp_over_ethernet = PTP_TCR_TSIPENA;
break;
+ case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
config.rx_filter = HWTSTAMP_FILTER_ALL;
@@ -774,7 +775,7 @@ static void stmmac_adjust_link(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
unsigned long flags;
- int new_state = 0;
+ bool new_state = false;
if (!phydev)
return;
@@ -787,8 +788,8 @@ static void stmmac_adjust_link(struct net_device *dev)
/* Now we make sure that we can be in full duplex mode.
* If not, we operate in half-duplex mode. */
if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
+ new_state = true;
+ if (!phydev->duplex)
ctrl &= ~priv->hw->link.duplex;
else
ctrl |= priv->hw->link.duplex;
@@ -799,30 +800,17 @@ static void stmmac_adjust_link(struct net_device *dev)
stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) {
- new_state = 1;
+ new_state = true;
+ ctrl &= ~priv->hw->link.speed_mask;
switch (phydev->speed) {
- case 1000:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4)
- ctrl &= ~priv->hw->link.port;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
break;
- case 100:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4) {
- ctrl |= priv->hw->link.port;
- ctrl |= priv->hw->link.speed;
- } else {
- ctrl &= ~priv->hw->link.port;
- }
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
break;
- case 10:
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4) {
- ctrl |= priv->hw->link.port;
- ctrl &= ~(priv->hw->link.speed);
- } else {
- ctrl &= ~priv->hw->link.port;
- }
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
break;
default:
netif_warn(priv, link, priv->dev,
@@ -838,12 +826,12 @@ static void stmmac_adjust_link(struct net_device *dev)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
+ new_state = true;
+ priv->oldlink = true;
}
} else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
+ new_state = true;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
}
@@ -906,7 +894,7 @@ static int stmmac_init_phy(struct net_device *dev)
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
int max_speed = priv->plat->max_speed;
- priv->oldlink = 0;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
@@ -2879,8 +2867,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
- if (!priv->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
@@ -3083,8 +3070,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
- if (!priv->hwts_tx_en)
- skb_tx_timestamp(skb);
+ skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
@@ -4292,7 +4278,7 @@ int stmmac_suspend(struct device *dev)
}
spin_unlock_irqrestore(&priv->lock, flags);
- priv->oldlink = 0;
+ priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
priv->oldduplex = DUPLEX_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f4d7aec50479..37fc16521143 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1734,6 +1734,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
return -ERANGE;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 7c634bc75615..aec95382ea5c 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -512,6 +512,7 @@ static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 25fd3b04b3c0..7a218549c80a 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -912,7 +912,7 @@ static int ca8210_spi_transfer(
)
{
int i, status = 0;
- struct ca8210_priv *priv = spi_get_drvdata(spi);
+ struct ca8210_priv *priv;
struct cas_control *cas_ctl;
if (!spi) {
@@ -923,6 +923,7 @@ static int ca8210_spi_transfer(
return -ENODEV;
}
+ priv = spi_get_drvdata(spi);
reinit_completion(&priv->spi_transfer_complete);
dev_dbg(&spi->dev, "ca8210_spi_transfer called\n");
@@ -1808,10 +1809,9 @@ static int ca8210_skb_rx(
/* Allocate mtu size buffer for every rx packet */
skb = dev_alloc_skb(IEEE802154_MTU + sizeof(hdr));
- if (!skb) {
- dev_crit(&priv->spi->dev, "dev_alloc_skb failed\n");
+ if (!skb)
return -ENOMEM;
- }
+
skb_reserve(skb, sizeof(hdr));
msdulen = data_ind[22]; /* msdu_length */
@@ -3143,10 +3143,6 @@ static int ca8210_probe(struct spi_device *spi_device)
pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
- dev_crit(
- &spi_device->dev,
- "Could not allocate platform data\n"
- );
ret = -ENOMEM;
goto error;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index cdc347be68f2..91642fd87cd1 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -588,8 +588,6 @@ static void count_tx(struct net_device *dev, int ret, int len)
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
- } else {
- dev->stats.tx_dropped++;
}
}
@@ -883,7 +881,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
struct macsec_rx_sc *rx_sc = rx_sa->sc;
- int len, ret;
+ int len;
u32 pn;
aead_request_free(macsec_skb_cb(skb)->req);
@@ -904,11 +902,8 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_reset_skb(skb, macsec->secy.netdev);
len = skb->len;
- ret = gro_cells_receive(&macsec->gro_cells, skb);
- if (ret == NET_RX_SUCCESS)
+ if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
count_rx(dev, len);
- else
- macsec->secy.netdev->stats.rx_dropped++;
rcu_read_unlock_bh();
@@ -1037,7 +1032,6 @@ static void handle_not_macsec(struct sk_buff *skb)
*/
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
- int ret;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
@@ -1054,13 +1048,10 @@ static void handle_not_macsec(struct sk_buff *skb)
nskb->dev = macsec->secy.netdev;
- ret = netif_rx(nskb);
- if (ret == NET_RX_SUCCESS) {
+ if (netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
- } else {
- macsec->secy.netdev->stats.rx_dropped++;
}
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a32dc5d11e89..1e9ad30a35c8 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -540,7 +540,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -551,7 +551,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -562,7 +562,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -573,7 +573,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -584,7 +584,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -595,7 +595,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -606,7 +606,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -617,7 +617,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.read_status = genphy_read_status,
@@ -628,7 +628,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.read_status = genphy_read_status,
@@ -639,7 +639,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
.config_aneg = genphy_config_aneg,
.read_status = bcm5482_read_status,
@@ -650,7 +650,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -661,7 +661,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -672,7 +672,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -683,7 +683,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -694,7 +694,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9097e42bec2e..1a72bebc588a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -41,6 +41,12 @@
#include <linux/uaccess.h>
#define MII_MARVELL_PHY_PAGE 22
+#define MII_MARVELL_COPPER_PAGE 0x00
+#define MII_MARVELL_FIBER_PAGE 0x01
+#define MII_MARVELL_MSCR_PAGE 0x02
+#define MII_MARVELL_LED_PAGE 0x03
+#define MII_MARVELL_MISC_TEST_PAGE 0x06
+#define MII_MARVELL_WOL_PAGE 0x11
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -54,7 +60,6 @@
#define MII_M1011_PHY_SCR_MDI_X 0x0020
#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060
-#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16
#define MII_M1145_PHY_EXT_SR 0x1b
#define MII_M1145_PHY_EXT_CR 0x14
#define MII_M1145_RGMII_RX_DELAY 0x0080
@@ -83,10 +88,6 @@
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
-#define MII_M1111_COPPER 0
-#define MII_M1111_FIBER 1
-
-#define MII_88E1121_PHY_MSCR_PAGE 2
#define MII_88E1121_PHY_MSCR_REG 21
#define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5)
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
@@ -112,7 +113,6 @@
#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
/* LED Timer Control Register */
-#define MII_88E1318S_PHY_LED_PAGE 0x03
#define MII_88E1318S_PHY_LED_TCR 0x12
#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
@@ -123,13 +123,11 @@
#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18
#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19
-#define MII_88E1318S_PHY_WOL_PAGE 0x11
#define MII_88E1318S_PHY_WOL_CTRL 0x10
#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
#define MII_88E1121_PHY_LED_CTRL 16
-#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
#define MII_M1011_PHY_STATUS 0x11
@@ -189,6 +187,29 @@ struct marvell_priv {
struct device *hwmon_dev;
};
+static int marvell_get_page(struct phy_device *phydev)
+{
+ return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+}
+
+static int marvell_set_page(struct phy_device *phydev, int page)
+{
+ return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
+}
+
+static int marvell_get_set_page(struct phy_device *phydev, int page)
+{
+ int oldpage = marvell_get_page(phydev);
+
+ if (oldpage < 0)
+ return oldpage;
+
+ if (page != oldpage)
+ return marvell_set_page(phydev, page);
+
+ return 0;
+}
+
static int marvell_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -207,9 +228,11 @@ static int marvell_config_intr(struct phy_device *phydev)
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ err = phy_write(phydev, MII_M1011_IMASK,
+ MII_M1011_IMASK_INIT);
else
- err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+ err = phy_write(phydev, MII_M1011_IMASK,
+ MII_M1011_IMASK_CLEAR);
return err;
}
@@ -271,8 +294,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
if (phydev->autoneg != AUTONEG_ENABLE) {
int bmcr;
- /*
- * A write to speed/duplex bits (that is performed by
+ /* A write to speed/duplex bits (that is performed by
* genphy_config_aneg() call above) must be followed by
* a software reset. Otherwise, the write has no effect.
*/
@@ -367,8 +389,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
}
#ifdef CONFIG_OF_MDIO
-/*
- * Set and/or override some configuration registers based on the
+/* Set and/or override some configuration registers based on the
* marvell,reg-init property stored in the of_node for the phydev.
*
* marvell,reg-init = <reg-page reg mask value>,...;
@@ -394,7 +415,7 @@ static int marvell_of_reg_init(struct phy_device *phydev)
if (!paddr || len < (4 * sizeof(*paddr)))
return 0;
- saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ saved_page = marvell_get_page(phydev);
if (saved_page < 0)
return saved_page;
current_page = saved_page;
@@ -402,15 +423,15 @@ static int marvell_of_reg_init(struct phy_device *phydev)
ret = 0;
len /= sizeof(*paddr);
for (i = 0; i < len - 3; i += 4) {
- u16 reg_page = be32_to_cpup(paddr + i);
+ u16 page = be32_to_cpup(paddr + i);
u16 reg = be32_to_cpup(paddr + i + 1);
u16 mask = be32_to_cpup(paddr + i + 2);
u16 val_bits = be32_to_cpup(paddr + i + 3);
int val;
- if (reg_page != current_page) {
- current_page = reg_page;
- ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+ if (page != current_page) {
+ current_page = page;
+ ret = marvell_set_page(phydev, page);
if (ret < 0)
goto err;
}
@@ -429,11 +450,10 @@ static int marvell_of_reg_init(struct phy_device *phydev)
ret = phy_write(phydev, reg, val);
if (ret < 0)
goto err;
-
}
err:
if (current_page != saved_page) {
- i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+ i = marvell_set_page(phydev, saved_page);
if (ret == 0)
ret = i;
}
@@ -450,15 +470,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1121_PHY_MSCR_PAGE);
- if (err < 0)
- return err;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
+ if (oldpage < 0)
+ return oldpage;
if (phy_interface_is_rgmii(phydev)) {
-
mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -475,7 +491,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ marvell_set_page(phydev, oldpage);
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -493,12 +509,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1121_PHY_MSCR_PAGE);
- if (err < 0)
- return err;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
+ if (oldpage < 0)
+ return oldpage;
mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
@@ -507,7 +520,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ err = marvell_set_page(phydev, oldpage);
if (err < 0)
return err;
@@ -607,7 +620,7 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
@@ -617,7 +630,7 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
goto error;
/* Then the fiber link */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -625,10 +638,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
if (err < 0)
goto error;
- return phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
@@ -651,7 +664,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
mdelay(500);
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -663,7 +676,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 2);
+ err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
if (err < 0)
return err;
temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC);
@@ -672,7 +685,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp);
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -706,103 +719,129 @@ static int m88e3016_config_init(struct phy_device *phydev)
return marvell_config_init(phydev);
}
-static int m88e1111_config_init(struct phy_device *phydev)
+static int m88e1111_config_init_rgmii(struct phy_device *phydev)
{
int err;
int temp;
- if (phy_interface_is_rgmii(phydev)) {
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
- temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
- if (temp < 0)
- return temp;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ temp &= ~MII_M1111_TX_DELAY;
+ temp |= MII_M1111_RX_DELAY;
+ } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ temp &= ~MII_M1111_RX_DELAY;
+ temp |= MII_M1111_TX_DELAY;
+ }
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- temp &= ~MII_M1111_TX_DELAY;
- temp |= MII_M1111_RX_DELAY;
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- temp &= ~MII_M1111_RX_DELAY;
- temp |= MII_M1111_TX_DELAY;
- }
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
- err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
- if (err < 0)
- return err;
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK);
- temp &= ~(MII_M1111_HWCFG_MODE_MASK);
+ if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES)
+ temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII;
+ else
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII;
- if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES)
- temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII;
- else
- temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII;
+ return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+}
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
- if (err < 0)
- return err;
- }
+static int m88e1111_config_init_sgmii(struct phy_device *phydev)
+{
+ int err;
+ int temp;
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
- temp &= ~(MII_M1111_HWCFG_MODE_MASK);
- temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
- temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK);
+ temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
+ temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
- if (err < 0)
- return err;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
- /* make sure copper is selected */
- err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
- if (err < 0)
- return err;
+ /* make sure copper is selected */
+ return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+}
- err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
- err & (~0xff));
- if (err < 0)
- return err;
- }
+static int m88e1111_config_init_rtbi(struct phy_device *phydev)
+{
+ int err;
+ int temp;
- if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
- temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
- if (temp < 0)
- return temp;
- temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
- err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
- if (err < 0)
- return err;
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
- temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
- temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
- if (err < 0)
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* soft reset */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ do
+ temp = phy_read(phydev, MII_BMCR);
+ while (temp & BMCR_RESET);
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI |
+ MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+
+ return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+}
+
+static int m88e1111_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ err = m88e1111_config_init_rgmii(phydev);
+ if (err)
return err;
+ }
- /* soft reset */
- err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ err = m88e1111_config_init_sgmii(phydev);
if (err < 0)
return err;
- do
- temp = phy_read(phydev, MII_BMCR);
- while (temp & BMCR_RESET);
+ }
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- if (temp < 0)
- return temp;
- temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
- temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
+ err = m88e1111_config_init_rtbi(phydev);
if (err < 0)
return err;
}
@@ -818,11 +857,9 @@ static int m88e1121_config_init(struct phy_device *phydev)
{
int err, oldpage;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- if (err < 0)
- return err;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
+ if (oldpage < 0)
+ return oldpage;
/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
@@ -830,7 +867,7 @@ static int m88e1121_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ marvell_set_page(phydev, oldpage);
/* Set marvell,reg-init configuration from device tree */
return marvell_config_init(phydev);
@@ -844,7 +881,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
/* Select page 18 */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18);
+ err = marvell_set_page(phydev, 18);
if (err < 0)
return err;
@@ -863,7 +900,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
/* Reset page selection */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
}
@@ -893,7 +930,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
int err;
/* Change address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
if (err < 0)
return err;
@@ -903,7 +940,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
return err;
/* Change address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
+ err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
if (err < 0)
return err;
@@ -920,7 +957,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
return err;
/* Reset address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -932,7 +969,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
int err;
/* Change address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE);
if (err < 0)
return err;
@@ -946,17 +983,70 @@ static int m88e1149_config_init(struct phy_device *phydev)
return err;
/* Reset address */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1145_config_init_rgmii(struct phy_device *phydev)
+{
+ int err;
+ int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR);
+
+ if (temp < 0)
+ return temp;
+
+ temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY);
+
+ err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ if (phydev->dev_flags & MARVELL_PHY_M1145_FLAGS_RESISTANCE) {
+ err = phy_write(phydev, 0x1d, 0x0012);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, 0x1e);
+ if (temp < 0)
+ return temp;
+
+ temp &= 0xf03f;
+ temp |= 2 << 9; /* 36 ohm */
+ temp |= 2 << 6; /* 39 ohm */
+
+ err = phy_write(phydev, 0x1e, temp);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x3);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x8000);
+ }
+ return err;
+}
+
+static int m88e1145_config_init_sgmii(struct phy_device *phydev)
+{
+ int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
+
+ if (temp < 0)
+ return temp;
+
+ temp &= ~MII_M1145_HWCFG_MODE_MASK;
+ temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
+ temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
+
+ return phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
+}
+
static int m88e1145_config_init(struct phy_device *phydev)
{
int err;
- int temp;
/* Take care of errata E0 & E1 */
err = phy_write(phydev, 0x1d, 0x001b);
@@ -976,53 +1066,13 @@ static int m88e1145_config_init(struct phy_device *phydev)
return err;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
- int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR);
- if (temp < 0)
- return temp;
-
- temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY);
-
- err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp);
+ err = m88e1145_config_init_rgmii(phydev);
if (err < 0)
return err;
-
- if (phydev->dev_flags & MARVELL_PHY_M1145_FLAGS_RESISTANCE) {
- err = phy_write(phydev, 0x1d, 0x0012);
- if (err < 0)
- return err;
-
- temp = phy_read(phydev, 0x1e);
- if (temp < 0)
- return temp;
-
- temp &= 0xf03f;
- temp |= 2 << 9; /* 36 ohm */
- temp |= 2 << 6; /* 39 ohm */
-
- err = phy_write(phydev, 0x1e, temp);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x3);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x8000);
- if (err < 0)
- return err;
- }
}
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
- if (temp < 0)
- return temp;
-
- temp &= ~MII_M1145_HWCFG_MODE_MASK;
- temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
- temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
-
- err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
+ err = m88e1145_config_init_sgmii(phydev);
if (err < 0)
return err;
}
@@ -1065,7 +1115,8 @@ static int marvell_update_link(struct phy_device *phydev, int fiber)
int status;
/* Use the generic register for copper link, or specific
- * register for fiber case */
+ * register for fiber case
+ */
if (fiber) {
status = phy_read(phydev, MII_M1011_PHY_STATUS);
if (status < 0)
@@ -1082,125 +1133,143 @@ static int marvell_update_link(struct phy_device *phydev, int fiber)
return 0;
}
-/* marvell_read_status_page
- *
- * Description:
- * Check the link, then figure out the current state
- * by comparing what we advertise with what the link partner
- * advertises. Start by checking the gigabit possibilities,
- * then move on to 10/100.
- */
-static int marvell_read_status_page(struct phy_device *phydev, int page)
+static int marvell_read_status_page_an(struct phy_device *phydev,
+ int fiber)
{
- int adv;
- int err;
+ int status;
int lpa;
int lpagb;
- int status = 0;
- int fiber;
+ int adv;
- /* Detect and update the link, but return if there
- * was an error */
- if (page == MII_M1111_FIBER)
- fiber = 1;
- else
- fiber = 0;
+ status = phy_read(phydev, MII_M1011_PHY_STATUS);
+ if (status < 0)
+ return status;
- err = marvell_update_link(phydev, fiber);
- if (err)
- return err;
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
- if (AUTONEG_ENABLE == phydev->autoneg) {
- status = phy_read(phydev, MII_M1011_PHY_STATUS);
- if (status < 0)
- return status;
+ lpagb = phy_read(phydev, MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
- lpa = phy_read(phydev, MII_LPA);
- if (lpa < 0)
- return lpa;
+ adv = phy_read(phydev, MII_ADVERTISE);
+ if (adv < 0)
+ return adv;
- lpagb = phy_read(phydev, MII_STAT1000);
- if (lpagb < 0)
- return lpagb;
+ lpa &= adv;
- adv = phy_read(phydev, MII_ADVERTISE);
- if (adv < 0)
- return adv;
+ if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
- lpa &= adv;
+ status = status & MII_M1011_PHY_STATUS_SPD_MASK;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
- if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
+ switch (status) {
+ case MII_M1011_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
- status = status & MII_M1011_PHY_STATUS_SPD_MASK;
- phydev->pause = phydev->asym_pause = 0;
+ case MII_M1011_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
- switch (status) {
- case MII_M1011_PHY_STATUS_1000:
- phydev->speed = SPEED_1000;
- break;
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
- case MII_M1011_PHY_STATUS_100:
- phydev->speed = SPEED_100;
- break;
+ if (!fiber) {
+ phydev->lp_advertising =
+ mii_stat1000_to_ethtool_lpa_t(lpagb) |
+ mii_lpa_to_ethtool_lpa_t(lpa);
- default:
- phydev->speed = SPEED_10;
- break;
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
}
-
- if (!fiber) {
- phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
- mii_lpa_to_ethtool_lpa_t(lpa);
-
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
- }
- } else {
- /* The fiber link is only 1000M capable */
- phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
-
- if (phydev->duplex == DUPLEX_FULL) {
- if (!(lpa & LPA_PAUSE_FIBER)) {
- phydev->pause = 0;
- phydev->asym_pause = 0;
- } else if ((lpa & LPA_PAUSE_ASYM_FIBER)) {
- phydev->pause = 1;
- phydev->asym_pause = 1;
- } else {
- phydev->pause = 1;
- phydev->asym_pause = 0;
- }
+ } else {
+ /* The fiber link is only 1000M capable */
+ phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ if (!(lpa & LPA_PAUSE_FIBER)) {
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ } else if ((lpa & LPA_PAUSE_ASYM_FIBER)) {
+ phydev->pause = 1;
+ phydev->asym_pause = 1;
+ } else {
+ phydev->pause = 1;
+ phydev->asym_pause = 0;
}
}
- } else {
- int bmcr = phy_read(phydev, MII_BMCR);
+ }
+ return 0;
+}
- if (bmcr < 0)
- return bmcr;
+static int marvell_read_status_page_fixed(struct phy_device *phydev)
+{
+ int bmcr = phy_read(phydev, MII_BMCR);
- if (bmcr & BMCR_FULLDPLX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
+ if (bmcr < 0)
+ return bmcr;
- if (bmcr & BMCR_SPEED1000)
- phydev->speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
- phydev->speed = SPEED_100;
- else
- phydev->speed = SPEED_10;
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
- phydev->pause = phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
- }
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->lp_advertising = 0;
return 0;
}
+/* marvell_read_status_page
+ *
+ * Description:
+ * Check the link, then figure out the current state
+ * by comparing what we advertise with what the link partner
+ * advertises. Start by checking the gigabit possibilities,
+ * then move on to 10/100.
+ */
+static int marvell_read_status_page(struct phy_device *phydev, int page)
+{
+ int fiber;
+ int err;
+
+ /* Detect and update the link, but return if there
+ * was an error
+ */
+ if (page == MII_MARVELL_FIBER_PAGE)
+ fiber = 1;
+ else
+ fiber = 0;
+
+ err = marvell_update_link(phydev, fiber);
+ if (err)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ err = marvell_read_status_page_an(phydev, fiber);
+ else
+ err = marvell_read_status_page_fixed(phydev);
+
+ return err;
+}
+
/* marvell_read_status
*
* Some Marvell's phys have two modes: fiber and copper.
@@ -1217,33 +1286,34 @@ static int marvell_read_status(struct phy_device *phydev)
/* Check the fiber mode first */
if (phydev->supported & SUPPORTED_FIBRE &&
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
- err = marvell_read_status_page(phydev, MII_M1111_FIBER);
+ err = marvell_read_status_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
- /* If the fiber link is up, it is the selected and used link.
- * In this case, we need to stay in the fiber page.
- * Please to be careful about that, avoid to restore Copper page
- * in other functions which could break the behaviour
- * for some fiber phy like 88E1512.
- * */
+ /* If the fiber link is up, it is the selected and
+ * used link. In this case, we need to stay in the
+ * fiber page. Please to be careful about that, avoid
+ * to restore Copper page in other functions which
+ * could break the behaviour for some fiber phy like
+ * 88E1512.
+ */
if (phydev->link)
return 0;
/* If fiber link is down, check and save copper mode state */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
}
- return marvell_read_status_page(phydev, MII_M1111_COPPER);
+ return marvell_read_status_page(phydev, MII_MARVELL_COPPER_PAGE);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
@@ -1258,7 +1328,7 @@ static int marvell_suspend(struct phy_device *phydev)
/* Suspend the fiber mode first */
if (!(phydev->supported & SUPPORTED_FIBRE)) {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1268,7 +1338,7 @@ static int marvell_suspend(struct phy_device *phydev)
goto error;
/* Then, the copper link */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
}
@@ -1277,7 +1347,7 @@ static int marvell_suspend(struct phy_device *phydev)
return genphy_suspend(phydev);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
@@ -1292,7 +1362,7 @@ static int marvell_resume(struct phy_device *phydev)
/* Resume the fiber mode first */
if (!(phydev->supported & SUPPORTED_FIBRE)) {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1302,7 +1372,7 @@ static int marvell_resume(struct phy_device *phydev)
goto error;
/* Then, the copper link */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
goto error;
}
@@ -1311,13 +1381,14 @@ static int marvell_resume(struct phy_device *phydev)
return genphy_resume(phydev);
error:
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
return err;
}
static int marvell_aneg_done(struct phy_device *phydev)
{
int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
+
return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
}
@@ -1333,32 +1404,33 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
return 0;
}
-static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+static void m88e1318_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
{
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- if (phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_WOL_PAGE) < 0)
+ if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
return;
if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
- if (phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00) < 0)
+ if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
return;
}
-static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+static int m88e1318_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
{
int err, oldpage, temp;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ oldpage = marvell_get_page(phydev);
if (wol->wolopts & WAKE_MAGIC) {
/* Explicitly switch to page 0x00, just to be sure */
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00);
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -1369,8 +1441,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_LED_PAGE);
+ err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
if (err < 0)
return err;
@@ -1383,8 +1454,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w
if (err < 0)
return err;
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_WOL_PAGE);
+ err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
return err;
@@ -1413,8 +1483,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w
if (err < 0)
return err;
} else {
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- MII_88E1318S_PHY_WOL_PAGE);
+ err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
if (err < 0)
return err;
@@ -1427,7 +1496,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w
return err;
}
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ err = marvell_set_page(phydev, oldpage);
if (err < 0)
return err;
@@ -1459,13 +1528,11 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
struct marvell_priv *priv = phydev->priv;
- int err, oldpage, val;
+ int oldpage, val;
u64 ret;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
- stat.page);
- if (err < 0)
+ oldpage = marvell_get_set_page(phydev, stat.page);
+ if (oldpage < 0)
return UINT64_MAX;
val = phy_read(phydev, stat.reg);
@@ -1477,7 +1544,7 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
ret = priv->stats[i];
}
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ marvell_set_page(phydev, oldpage);
return ret;
}
@@ -1494,6 +1561,7 @@ static void marvell_get_stats(struct phy_device *phydev,
#ifdef CONFIG_HWMON
static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
{
+ int oldpage;
int ret;
int val;
@@ -1501,9 +1569,11 @@ static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
/* Enable temperature sensor */
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
@@ -1533,7 +1603,7 @@ static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
*temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
@@ -1610,15 +1680,18 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
{
+ int oldpage;
int ret;
*temp = 0;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
if (ret < 0)
@@ -1627,7 +1700,7 @@ static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
*temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
@@ -1635,15 +1708,18 @@ error:
int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
{
+ int oldpage;
int ret;
*temp = 0;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
@@ -1655,7 +1731,7 @@ int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
*temp *= 1000;
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
@@ -1663,13 +1739,16 @@ error:
int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
{
+ int oldpage;
int ret;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
@@ -1682,7 +1761,7 @@ int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
(temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
@@ -1690,15 +1769,18 @@ error:
int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
{
+ int oldpage;
int ret;
*alarm = false;
mutex_lock(&phydev->lock);
- ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
- if (ret < 0)
- goto error;
+ oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+ if (oldpage < 0) {
+ mutex_unlock(&phydev->lock);
+ return oldpage;
+ }
ret = phy_read(phydev, MII_88E1121_MISC_TEST);
if (ret < 0)
@@ -1706,7 +1788,7 @@ int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
*alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
error:
- phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ marvell_set_page(phydev, oldpage);
mutex_unlock(&phydev->lock);
return ret;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 3e2ac07b6e37..bfd3090fb055 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -34,76 +34,73 @@
static bool xgene_mdio_status;
-static u32 xgene_enet_rd_mac(void __iomem *base_addr, u32 rd_addr)
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr)
{
void __iomem *addr, *rd, *cmd, *cmd_done;
u32 done, rd_data = BUSY_MASK;
u8 wait = 10;
- addr = base_addr + MAC_ADDR_REG_OFFSET;
- rd = base_addr + MAC_READ_REG_OFFSET;
- cmd = base_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET;
+ rd = pdata->mac_csr_addr + MAC_READ_REG_OFFSET;
+ cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ spin_lock(&pdata->mac_lock);
iowrite32(rd_addr, addr);
iowrite32(XGENE_ENET_RD_CMD, cmd);
- while (wait--) {
- done = ioread32(cmd_done);
- if (done)
- break;
+ while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
- }
- if (!done)
- return rd_data;
+ if (done)
+ rd_data = ioread32(rd);
- rd_data = ioread32(rd);
iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
return rd_data;
}
+EXPORT_SYMBOL(xgene_mdio_rd_mac);
-static void xgene_enet_wr_mac(void __iomem *base_addr, u32 wr_addr, u32 wr_data)
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data)
{
void __iomem *addr, *wr, *cmd, *cmd_done;
u8 wait = 10;
u32 done;
- addr = base_addr + MAC_ADDR_REG_OFFSET;
- wr = base_addr + MAC_WRITE_REG_OFFSET;
- cmd = base_addr + MAC_COMMAND_REG_OFFSET;
- cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ addr = pdata->mac_csr_addr + MAC_ADDR_REG_OFFSET;
+ wr = pdata->mac_csr_addr + MAC_WRITE_REG_OFFSET;
+ cmd = pdata->mac_csr_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mac_csr_addr + MAC_COMMAND_DONE_REG_OFFSET;
+ spin_lock(&pdata->mac_lock);
iowrite32(wr_addr, addr);
- iowrite32(wr_data, wr);
+ iowrite32(data, wr);
iowrite32(XGENE_ENET_WR_CMD, cmd);
- while (wait--) {
- done = ioread32(cmd_done);
- if (done)
- break;
+ while (!(done = ioread32(cmd_done)) && wait--)
udelay(1);
- }
if (!done)
pr_err("MCX mac write failed, addr: 0x%04x\n", wr_addr);
iowrite32(0, cmd);
+ spin_unlock(&pdata->mac_lock);
}
+EXPORT_SYMBOL(xgene_mdio_wr_mac);
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg)
{
- void __iomem *addr = (void __iomem *)bus->priv;
+ struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv;
u32 data, done;
u8 wait = 10;
data = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg);
- xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, data);
- xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, data);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
do {
usleep_range(5, 10);
- done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR);
+ done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR);
} while ((done & BUSY_MASK) && wait--);
if (done & BUSY_MASK) {
@@ -111,8 +108,8 @@ int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg)
return -EBUSY;
}
- data = xgene_enet_rd_mac(addr, MII_MGMT_STATUS_ADDR);
- xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, 0);
+ data = xgene_mdio_rd_mac(pdata, MII_MGMT_STATUS_ADDR);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
return data;
}
@@ -120,17 +117,17 @@ EXPORT_SYMBOL(xgene_mdio_rgmii_read);
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
{
- void __iomem *addr = (void __iomem *)bus->priv;
+ struct xgene_mdio_pdata *pdata = (struct xgene_mdio_pdata *)bus->priv;
u32 val, done;
u8 wait = 10;
val = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg);
- xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, val);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_ADDRESS_ADDR, val);
- xgene_enet_wr_mac(addr, MII_MGMT_CONTROL_ADDR, data);
+ xgene_mdio_wr_mac(pdata, MII_MGMT_CONTROL_ADDR, data);
do {
usleep_range(5, 10);
- done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR);
+ done = xgene_mdio_rd_mac(pdata, MII_MGMT_INDICATORS_ADDR);
} while ((done & BUSY_MASK) && wait--);
if (done & BUSY_MASK) {
@@ -174,8 +171,8 @@ static int xgene_enet_ecc_init(struct xgene_mdio_pdata *pdata)
static void xgene_gmac_reset(struct xgene_mdio_pdata *pdata)
{
- xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, SOFT_RESET);
- xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, 0);
+ xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET);
+ xgene_mdio_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
@@ -375,6 +372,9 @@ static int xgene_mdio_probe(struct platform_device *pdev)
pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET;
pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET;
+ if (mdio_id == XGENE_MDIO_RGMII)
+ spin_lock_init(&pdata->mac_lock);
+
if (dev->of_node) {
pdata->clk = devm_clk_get(dev, NULL);
if (IS_ERR(pdata->clk)) {
@@ -396,7 +396,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
if (mdio_id == XGENE_MDIO_RGMII) {
mdio_bus->read = xgene_mdio_rgmii_read;
mdio_bus->write = xgene_mdio_rgmii_write;
- mdio_bus->priv = (void __force *)pdata->mac_csr_addr;
+ mdio_bus->priv = (void __force *)pdata;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s",
"xgene-mii-rgmii");
} else {
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index 594a11d42401..3c85f3e30baa 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -102,6 +102,7 @@ struct xgene_mdio_pdata {
void __iomem *mdio_csr_addr;
struct mii_bus *mdio_bus;
int mdio_id;
+ spinlock_t mac_lock; /* mac lock */
};
/* Set the specified value into a bit-field defined by its starting position
@@ -132,6 +133,8 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
#define GET_BIT(field, src) \
xgene_enet_get_field_value(field ## _POS, 1, src)
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6a5fd18f062c..4cfd54182da2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -779,7 +779,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
@@ -793,7 +793,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -811,7 +811,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -829,7 +829,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -847,7 +847,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -865,7 +865,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -883,7 +883,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -901,7 +901,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -919,7 +919,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -932,7 +932,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -952,7 +952,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
@@ -969,7 +969,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
- .flags = PHY_HAS_MAGICANEG,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
@@ -980,7 +979,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -991,7 +990,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 2b2f543cf9f0..37ee856c7680 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -146,7 +146,7 @@ static struct phy_driver microchip_phy_driver[] = {
.name = "Microchip LAN88xx",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1219eeab69d1..f84414b8f2ee 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -960,6 +960,15 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->attached_dev = dev;
dev->phydev = phydev;
+ err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
+ "attached_dev");
+ if (err)
+ goto error;
+
+ err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj,
+ "phydev");
+ if (err)
+ goto error;
phydev->dev_flags = flags;
@@ -1050,6 +1059,8 @@ void phy_detach(struct phy_device *phydev)
struct mii_bus *bus;
int i;
+ sysfs_remove_link(&dev->dev.kobj, "phydev");
+ sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_suspend(phydev);
@@ -1571,13 +1582,13 @@ int genphy_config_init(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL(genphy_config_init);
static int gen10g_soft_reset(struct phy_device *phydev)
{
/* Do nothing for now */
return 0;
}
-EXPORT_SYMBOL(genphy_config_init);
static int gen10g_config_init(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index cef6967b0396..67c9f2b26c8e 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -170,7 +170,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN83C185",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -192,7 +192,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8187",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -214,7 +214,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8700",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -236,7 +236,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN911x Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -257,7 +257,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -279,7 +279,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8740",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+ .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 4d4173d25dd0..9af3239d6ad5 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -824,15 +824,17 @@ done:
static ssize_t tap_do_read(struct tap_queue *q,
struct iov_iter *to,
- int noblock)
+ int noblock, struct sk_buff *skb)
{
DEFINE_WAIT(wait);
- struct sk_buff *skb;
ssize_t ret = 0;
if (!iov_iter_count(to))
return 0;
+ if (skb)
+ goto put;
+
while (1) {
if (!noblock)
prepare_to_wait(sk_sleep(&q->sk), &wait,
@@ -856,6 +858,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
if (!noblock)
finish_wait(sk_sleep(&q->sk), &wait);
+put:
if (skb) {
ret = tap_put_user(q, skb, to);
if (unlikely(ret < 0))
@@ -872,7 +875,7 @@ static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct tap_queue *q = file->private_data;
ssize_t len = iov_iter_count(to), ret;
- ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
+ ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -1155,7 +1158,8 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
- ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
+ ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT,
+ m->msg_control);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -1193,6 +1197,19 @@ struct socket *tap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tap_get_socket);
+struct skb_array *tap_get_skb_array(struct file *file)
+{
+ struct tap_queue *q;
+
+ if (file->f_op != &tap_fops)
+ return ERR_PTR(-EINVAL);
+ q = file->private_data;
+ if (!q)
+ return ERR_PTR(-EBADFD);
+ return &q->skb_array;
+}
+EXPORT_SYMBOL_GPL(tap_get_skb_array);
+
int tap_queue_resize(struct tap_dev *tap)
{
struct net_device *dev = tap->dev;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bbd707b9ef7a..f8041f9c7e65 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1510,9 +1510,8 @@ out:
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
- int noblock)
+ int noblock, struct sk_buff *skb)
{
- struct sk_buff *skb;
ssize_t ret;
int err;
@@ -1521,10 +1520,12 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
if (!iov_iter_count(to))
return 0;
- /* Read frames from ring */
- skb = tun_ring_recv(tfile, noblock, &err);
- if (!skb)
- return err;
+ if (!skb) {
+ /* Read frames from ring */
+ skb = tun_ring_recv(tfile, noblock, &err);
+ if (!skb)
+ return err;
+ }
ret = tun_put_user(tun, tfile, skb, to);
if (unlikely(ret < 0))
@@ -1544,7 +1545,7 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (!tun)
return -EBADFD;
- ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK);
+ ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
@@ -1646,7 +1647,8 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
- ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
+ ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
+ m->msg_control);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2626,6 +2628,19 @@ struct socket *tun_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(tun_get_socket);
+struct skb_array *tun_get_skb_array(struct file *file)
+{
+ struct tun_file *tfile;
+
+ if (file->f_op != &tun_fops)
+ return ERR_PTR(-EINVAL);
+ tfile = file->private_data;
+ if (!tfile)
+ return ERR_PTR(-EBADFD);
+ return &tfile->tx_array;
+}
+EXPORT_SYMBOL_GPL(tun_get_skb_array);
+
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 4cbdb1307f3e..3202c19df83d 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -264,17 +264,9 @@ static inline void nc_dump_status(struct usbnet *dev, u16 status)
* TTL register
*/
-#define TTL_THIS(ttl) (0x00ff & ttl)
#define TTL_OTHER(ttl) (0x00ff & (ttl >> 8))
#define MK_TTL(this,other) ((u16)(((other)<<8)|(0x00ff&(this))))
-static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
-{
- netif_dbg(dev, link, dev->net, "net1080 %s-%s ttl 0x%x this = %d, other = %d\n",
- dev->udev->bus->bus_name, dev->udev->devpath,
- ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
-}
-
/*-------------------------------------------------------------------------*/
static int net1080_reset(struct usbnet *dev)
@@ -308,7 +300,6 @@ static int net1080_reset(struct usbnet *dev)
goto done;
}
ttl = vp;
- // nc_dump_ttl(dev, ttl);
nc_register_write(dev, REG_TTL,
MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ddc62cb69be8..e902df9595b9 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -841,12 +841,6 @@ int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
}
static inline
-int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data)
-{
- return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB);
-}
-
-static inline
int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
{
return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 6742ae605660..33df76405b86 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -36,7 +36,6 @@
#define DRV_NAME "ucc_hdlc"
#define TDM_PPPOHT_SLIC_MAXIN
-#define BROKEN_FRAME_INFO
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
@@ -99,6 +98,13 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
uf_info->tsa = 1;
uf_info->ctsp = 1;
}
+
+ /* This sets HPM register in CMXUCR register which configures a
+ * open drain connected HDLC bus
+ */
+ if (priv->hdlc_bus)
+ uf_info->brkpt_support = 1;
+
uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
UCC_HDLC_UCCE_TXB) << 16);
@@ -114,6 +120,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Loopback mode */
if (priv->loopback) {
dev_info(priv->dev, "Loopback Mode\n");
+ /* use the same clock when work in loopback */
+ qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
+
gumr = ioread32be(&priv->uf_regs->gumr);
gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
UCC_FAST_GUMR_TCI);
@@ -133,11 +142,33 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Set UPSMR normal mode (need fixed)*/
iowrite32be(0, &priv->uf_regs->upsmr);
+ /* hdlc_bus mode */
+ if (priv->hdlc_bus) {
+ u32 upsmr;
+
+ dev_info(priv->dev, "HDLC bus Mode\n");
+ upsmr = ioread32be(&priv->uf_regs->upsmr);
+
+ /* bus mode and retransmit enable, with collision window
+ * set to 8 bytes
+ */
+ upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
+ UCC_HDLC_UPSMR_CW8;
+ iowrite32be(upsmr, &priv->uf_regs->upsmr);
+
+ /* explicitly disable CDS & CTSP */
+ gumr = ioread32be(&priv->uf_regs->gumr);
+ gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
+ /* set automatic sync to explicitly ignore CD signal */
+ gumr |= UCC_FAST_GUMR_SYNL_AUTO;
+ iowrite32be(gumr, &priv->uf_regs->gumr);
+ }
+
priv->rx_ring_size = RX_BD_RING_LEN;
priv->tx_ring_size = TX_BD_RING_LEN;
/* Alloc Rx BD */
priv->rx_bd_base = dma_alloc_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_rx_bd, GFP_KERNEL);
if (!priv->rx_bd_base) {
@@ -148,7 +179,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc Tx BD */
priv->tx_bd_base = dma_alloc_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_tx_bd, GFP_KERNEL);
if (!priv->tx_bd_base) {
@@ -158,7 +189,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
}
/* Alloc parameter ram for ucc hdlc */
- priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+ priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
if (priv->ucc_pram_offset < 0) {
@@ -295,11 +326,11 @@ free_ucc_pram:
qe_muram_free(priv->ucc_pram_offset);
free_tx_bd:
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
free_rx_bd:
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
free_uccf:
ucc_fast_free(priv->uccf);
@@ -314,8 +345,6 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
struct qe_bd __iomem *bd;
u16 bd_status;
unsigned long flags;
- u8 *send_buf;
- int i;
u16 *proto_head;
switch (dev->type) {
@@ -352,16 +381,6 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return -ENOMEM;
}
-
- pr_info("Tx data skb->len:%d ", skb->len);
- send_buf = (u8 *)skb->data;
- pr_info("\nTransmitted data:\n");
- for (i = 0; i < 16; i++) {
- if (i == skb->len)
- pr_info("++++");
- else
- pr_info("%02x\n", send_buf[i]);
- }
spin_lock_irqsave(&priv->lock, flags);
/* Start from the next BD that should be filled */
@@ -423,7 +442,6 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
skb = priv->tx_skbuff[priv->skb_dirtytx];
if (!skb)
break;
- pr_info("TxBD: %x\n", bd_status);
dev->stats.tx_packets++;
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
@@ -454,14 +472,12 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
{
struct net_device *dev = priv->ndev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
u16 bd_status;
u16 length, howmany = 0;
u8 *bdbuffer;
- int i;
- static int entry;
bd = priv->currx_bd;
bd_status = ioread16be(&bd->status);
@@ -471,9 +487,6 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
if (bd_status & R_OV_S)
dev->stats.rx_over_errors++;
if (bd_status & R_CR_S) {
-#ifdef BROKEN_FRAME_INFO
- pr_info("Broken Frame with RxBD: %x\n", bd_status);
-#endif
dev->stats.rx_crc_errors++;
dev->stats.rx_dropped++;
goto recycle;
@@ -482,17 +495,6 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
length = ioread16be(&bd->length);
- pr_info("Received data length:%d", length);
- pr_info("while entry times:%d", entry++);
-
- pr_info("\nReceived data:\n");
- for (i = 0; (i < 16); i++) {
- if (i == length)
- pr_info("++++");
- else
- pr_info("%02x\n", bdbuffer[i]);
- }
-
switch (dev->type) {
case ARPHRD_RAWHDLC:
bdbuffer += HDLC_HEAD_LEN;
@@ -531,7 +533,6 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
howmany++;
if (hdlc->proto)
skb->protocol = hdlc_type_trans(skb, dev);
- pr_info("skb->protocol:%x\n", skb->protocol);
netif_receive_skb(skb);
recycle:
@@ -566,7 +567,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
/* Tx event processing */
spin_lock(&priv->lock);
- hdlc_tx_done(priv);
+ hdlc_tx_done(priv);
spin_unlock(&priv->lock);
howmany = 0;
@@ -597,7 +598,6 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
uccm = ioread32be(uccf->p_uccm);
ucce &= uccm;
iowrite32be(ucce, uccf->p_ucce);
- pr_info("irq ucce:%x\n", ucce);
if (!ucce)
return IRQ_NONE;
@@ -688,7 +688,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
priv->rx_bd_base = NULL;
@@ -697,7 +697,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->tx_bd_base) {
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
priv->tx_bd_base = NULL;
@@ -855,7 +855,6 @@ static int uhdlc_suspend(struct device *dev)
/* save power */
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
- dev_dbg(dev, "ucc hdlc suspend\n");
return 0;
}
@@ -1001,7 +1000,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct ucc_hdlc_private *uhdlc_priv = NULL;
struct ucc_tdm_info *ut_info;
- struct ucc_tdm *utdm;
+ struct ucc_tdm *utdm = NULL;
struct resource res;
struct net_device *dev;
hdlc_device *hdlc;
@@ -1054,10 +1053,6 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* use the same clock when work in loopback */
- if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
- qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
-
ret = of_address_to_resource(np, 0, &res);
if (ret)
return -EINVAL;
@@ -1080,6 +1075,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
uhdlc_priv->loopback = 1;
+ if (of_get_property(np, "fsl,hdlc-bus", NULL))
+ uhdlc_priv->hdlc_bus = 1;
+
if (uhdlc_priv->tsa == 1) {
utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
if (!utdm) {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
index 881ecdeef076..c21134c1f180 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.h
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -78,6 +78,7 @@ struct ucc_hdlc_private {
u16 tsa;
bool hdlc_busy;
bool loopback;
+ bool hdlc_bus;
u8 *tx_buffer;
u8 *rx_buffer;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 2f11836078ab..8bd3ed905813 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -57,7 +57,8 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
- int result, old_qlen;
+ unsigned int old_qlen;
+ int result;
switch (ifr->ifr_settings.type) {
case IF_GET_PROTO:
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index a5c97342fd5d..b97405ed6cae 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2954,7 +2954,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
"WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
sprintf(host_buf, "host_%d", host->host_no);
- qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION);
+ qed_ops->common->set_name(qedf->cdev, host_buf);
/* Set xid max values */
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d6978cbc56f0..7658138f2283 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -2099,14 +2099,16 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
/* Update header info */
SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
ISCSI_ATTR_SIMPLE);
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- SET_FIELD(cmd_pdu_header.flags_attr,
- ISCSI_CMD_HDR_WRITE, 1);
- task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
- } else {
- SET_FIELD(cmd_pdu_header.flags_attr,
- ISCSI_CMD_HDR_READ, 1);
- task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ if (hdr->cdb[0] != TEST_UNIT_READY) {
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
}
cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
@@ -2117,7 +2119,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
- cmd_pdu_header.opcode = hdr->opcode;
+ cmd_pdu_header.hdr_first_byte = hdr->opcode;
qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
/* Fill tx AHS and rx buffer */
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index fd354d4e03eb..7df32a68bd54 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -578,7 +578,8 @@ int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
(struct iscsi_common_hdr *)cmd_header,
tx_sgl_params, cmd_params,
dif_task_params);
- else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+ else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
+ (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
return init_rw_iscsi_task(task_params,
ISCSI_TASK_TYPE_INITIATOR_READ,
conn_params,
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 3548d46f9b27..0c8ccffa4c38 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1461,9 +1461,6 @@ static const struct {
{ ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
"out of sge error"
},
- { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
- "tcp seg ip options error"
- },
{ ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
"tcp ip fragment error"
},
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 92775a8b74b1..073b3051bb8f 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1843,7 +1843,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi->mac);
sprintf(host_buf, "host_%d", qedi->shost->host_no);
- qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+ qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f61f852d6cfd..e3d7ea1288c6 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -28,6 +28,8 @@
#include <linux/if_macvlan.h>
#include <linux/if_tap.h>
#include <linux/if_vlan.h>
+#include <linux/skb_array.h>
+#include <linux/skbuff.h>
#include <net/sock.h>
@@ -85,6 +87,13 @@ struct vhost_net_ubuf_ref {
struct vhost_virtqueue *vq;
};
+#define VHOST_RX_BATCH 64
+struct vhost_net_buf {
+ struct sk_buff **queue;
+ int tail;
+ int head;
+};
+
struct vhost_net_virtqueue {
struct vhost_virtqueue vq;
size_t vhost_hlen;
@@ -99,6 +108,8 @@ struct vhost_net_virtqueue {
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
+ struct skb_array *rx_array;
+ struct vhost_net_buf rxq;
};
struct vhost_net {
@@ -117,6 +128,71 @@ struct vhost_net {
static unsigned vhost_net_zcopy_mask __read_mostly;
+static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
+{
+ if (rxq->tail != rxq->head)
+ return rxq->queue[rxq->head];
+ else
+ return NULL;
+}
+
+static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
+{
+ return rxq->tail - rxq->head;
+}
+
+static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
+{
+ return rxq->tail == rxq->head;
+}
+
+static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
+{
+ void *ret = vhost_net_buf_get_ptr(rxq);
+ ++rxq->head;
+ return ret;
+}
+
+static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ rxq->head = 0;
+ rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+ VHOST_RX_BATCH);
+ return rxq->tail;
+}
+
+static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
+ skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
+ vhost_net_buf_get_size(rxq));
+ rxq->head = rxq->tail = 0;
+ }
+}
+
+static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
+{
+ struct vhost_net_buf *rxq = &nvq->rxq;
+
+ if (!vhost_net_buf_is_empty(rxq))
+ goto out;
+
+ if (!vhost_net_buf_produce(nvq))
+ return 0;
+
+out:
+ return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+}
+
+static void vhost_net_buf_init(struct vhost_net_buf *rxq)
+{
+ rxq->head = rxq->tail = 0;
+}
+
static void vhost_net_enable_zcopy(int vq)
{
vhost_net_zcopy_mask |= 0x1 << vq;
@@ -201,6 +277,7 @@ static void vhost_net_vq_reset(struct vhost_net *n)
n->vqs[i].ubufs = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ vhost_net_buf_init(&n->vqs[i].rxq);
}
}
@@ -503,15 +580,14 @@ out:
mutex_unlock(&vq->mutex);
}
-static int peek_head_len(struct sock *sk)
+static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
{
- struct socket *sock = sk->sk_socket;
struct sk_buff *head;
int len = 0;
unsigned long flags;
- if (sock->ops->peek_len)
- return sock->ops->peek_len(sock);
+ if (rvq->rx_array)
+ return vhost_net_buf_peek(rvq);
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
@@ -537,10 +613,11 @@ static int sk_has_rx_data(struct sock *sk)
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
+ struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
unsigned long uninitialized_var(endtime);
- int len = peek_head_len(sk);
+ int len = peek_head_len(rvq, sk);
if (!len && vq->busyloop_timeout) {
/* Both tx vq and rx socket were polled here */
@@ -561,7 +638,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
vhost_poll_queue(&vq->poll);
mutex_unlock(&vq->mutex);
- len = peek_head_len(sk);
+ len = peek_head_len(rvq, sk);
}
return len;
@@ -699,6 +776,8 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
goto out;
+ if (nvq->rx_array)
+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
@@ -815,6 +894,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_net *n;
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
+ struct sk_buff **queue;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT);
@@ -826,6 +906,15 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return -ENOMEM;
}
+ queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!queue) {
+ kfree(vqs);
+ kvfree(n);
+ return -ENOMEM;
+ }
+ n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
+
dev = &n->dev;
vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
@@ -838,6 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].done_idx = 0;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
+ vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
@@ -853,11 +943,14 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
struct socket *sock;
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
sock = vq->private_data;
vhost_net_disable_vq(n, vq);
vq->private_data = NULL;
+ vhost_net_buf_unproduce(nvq);
mutex_unlock(&vq->mutex);
return sock;
}
@@ -912,6 +1005,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
+ kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->dev.vqs);
kvfree(n);
return 0;
@@ -950,6 +1044,25 @@ err:
return ERR_PTR(r);
}
+static struct skb_array *get_tap_skb_array(int fd)
+{
+ struct skb_array *array;
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+ array = tun_get_skb_array(file);
+ if (!IS_ERR(array))
+ goto out;
+ array = tap_get_skb_array(file);
+ if (!IS_ERR(array))
+ goto out;
+ array = NULL;
+out:
+ fput(file);
+ return array;
+}
+
static struct socket *get_tap_socket(int fd)
{
struct file *file = fget(fd);
@@ -1026,6 +1139,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vhost_net_disable_vq(n, vq);
vq->private_data = sock;
+ vhost_net_buf_unproduce(nvq);
+ if (index == VHOST_NET_VQ_RX)
+ nvq->rx_array = get_tap_skb_array(fd);
r = vhost_vq_init_access(vq);
if (r)
goto err_used;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0c16866a7aac..3cd18ac0697f 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -62,6 +62,7 @@ int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_enabled(const struct net_device *dev);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -78,6 +79,19 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+static inline bool br_multicast_enabled(const struct net_device *dev)
+{
+ return false;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
+bool br_vlan_enabled(const struct net_device *dev);
+#else
+static inline bool br_vlan_enabled(const struct net_device *dev)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3482c3c2037d..4837157da0dc 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -3,6 +3,7 @@
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
+struct skb_array *tap_get_skb_array(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -12,6 +13,10 @@ static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+static inline struct skb_array *tap_get_skb_array(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_TAP */
#include <net/sock.h>
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index ed6da2e6df90..bf9bdf42d577 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -19,6 +19,7 @@
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
+struct skb_array *tun_get_skb_array(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -28,5 +29,9 @@ static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+static inline struct skb_array *tun_get_skb_array(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index a940ec6a046c..b26a478930eb 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -300,6 +300,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+
+ MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
enum {
@@ -973,6 +975,7 @@ enum mlx5_cap_type {
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
+ MLX5_CAP_FPGA,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1094,6 +1097,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_FPGA(mdev, cap) \
+ MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 93273d9ea4d1..6ea2f5734e37 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -108,6 +108,8 @@ enum {
MLX5_REG_QTCT = 0x400a,
MLX5_REG_DCBX_PARAM = 0x4020,
MLX5_REG_DCBX_APP = 0x4021,
+ MLX5_REG_FPGA_CAP = 0x4022,
+ MLX5_REG_FPGA_CTRL = 0x4023,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -761,6 +763,9 @@ struct mlx5_core_dev {
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+#ifdef CONFIG_MLX5_FPGA
+ struct mlx5_fpga_device *fpga;
+#endif
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
@@ -895,11 +900,6 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
}
-static inline void *mlx5_vzalloc(unsigned long size)
-{
- return kvzalloc(size, GFP_KERNEL);
-}
-
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
@@ -925,6 +925,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 32de0724b400..6fa1eb6766af 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -32,6 +32,8 @@
#ifndef MLX5_IFC_H
#define MLX5_IFC_H
+#include "mlx5_ifc_fpga.h"
+
enum {
MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
@@ -56,7 +58,8 @@ enum {
MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
- MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
+ MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb,
+ MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20,
};
enum {
@@ -854,7 +857,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_tc[0x4];
u8 reserved_at_1d0[0x1];
u8 dcbx[0x1];
- u8 reserved_at_1d2[0x4];
+ u8 reserved_at_1d2[0x3];
+ u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
u8 reserved_at_1d8[0x1];
@@ -2186,6 +2190,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
+ struct mlx5_ifc_fpga_cap_bits fpga_cap;
u8 reserved_at_0[0x8000];
};
@@ -8182,6 +8187,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_sltp_reg_bits sltp_reg;
struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
+ struct mlx5_ifc_fpga_ctrl_bits fpga_ctrl_bits;
+ struct mlx5_ifc_fpga_cap_bits fpga_cap_bits;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
new file mode 100644
index 000000000000..0032d10ac6cf
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef MLX5_IFC_FPGA_H
+#define MLX5_IFC_FPGA_H
+
+struct mlx5_ifc_fpga_shell_caps_bits {
+ u8 max_num_qps[0x10];
+ u8 reserved_at_10[0x8];
+ u8 total_rcv_credits[0x8];
+
+ u8 reserved_at_20[0xe];
+ u8 qp_type[0x2];
+ u8 reserved_at_30[0x5];
+ u8 rae[0x1];
+ u8 rwe[0x1];
+ u8 rre[0x1];
+ u8 reserved_at_38[0x4];
+ u8 dc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+
+ u8 reserved_at_40[0x1a];
+ u8 log_ddr_size[0x6];
+
+ u8 max_fpga_qp_msg_size[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_fpga_cap_bits {
+ u8 fpga_id[0x8];
+ u8 fpga_device[0x18];
+
+ u8 register_file_ver[0x20];
+
+ u8 fpga_ctrl_modify[0x1];
+ u8 reserved_at_41[0x5];
+ u8 access_reg_query_mode[0x2];
+ u8 reserved_at_48[0x6];
+ u8 access_reg_modify_mode[0x2];
+ u8 reserved_at_50[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 image_version[0x20];
+
+ u8 image_date[0x20];
+
+ u8 image_time[0x20];
+
+ u8 shell_version[0x20];
+
+ u8 reserved_at_100[0x80];
+
+ struct mlx5_ifc_fpga_shell_caps_bits shell_caps;
+
+ u8 reserved_at_380[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 sandbox_product_version[0x10];
+ u8 sandbox_product_id[0x10];
+
+ u8 sandbox_basic_caps[0x20];
+
+ u8 reserved_at_3e0[0x10];
+ u8 sandbox_extended_caps_len[0x10];
+
+ u8 sandbox_extended_caps_addr[0x40];
+
+ u8 fpga_ddr_start_addr[0x40];
+
+ u8 fpga_cr_space_start_addr[0x40];
+
+ u8 fpga_ddr_size[0x20];
+
+ u8 fpga_cr_space_size[0x20];
+
+ u8 reserved_at_500[0x300];
+};
+
+struct mlx5_ifc_fpga_ctrl_bits {
+ u8 reserved_at_0[0x8];
+ u8 operation[0x8];
+ u8 reserved_at_10[0x8];
+ u8 status[0x8];
+
+ u8 reserved_at_20[0x8];
+ u8 flash_select_admin[0x8];
+ u8 reserved_at_30[0x8];
+ u8 flash_select_oper[0x8];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_CORRUPTED_DDR = 0x1,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_FLASH_TIMEOUT = 0x2,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_INTERNAL_LINK_ERROR = 0x3,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_WATCHDOG_FAILURE = 0x4,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_I2C_FAILURE = 0x5,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_IMAGE_CHANGED = 0x6,
+ MLX5_FPGA_ERROR_EVENT_SYNDROME_TEMPERATURE_CRITICAL = 0x7,
+};
+
+struct mlx5_ifc_fpga_error_event_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_at_60[0x80];
+};
+
+#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3f39d27decf4..c50c9218e31e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1824,7 +1824,7 @@ struct net_device {
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
- unsigned long tx_queue_len;
+ unsigned int tx_queue_len;
spinlock_t tx_global_lock;
int watchdog_timeo;
@@ -2456,6 +2456,7 @@ static inline int dev_recursion_level(void)
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
@@ -2573,9 +2574,7 @@ static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
__ret = __skb_gro_checksum_validate_complete(skb, \
compute_pseudo(skb, proto)); \
- if (__ret) \
- __skb_mark_checksum_bad(skb); \
- else \
+ if (!__ret) \
skb_gro_incr_csum_unnecessary(skb); \
__ret; \
})
@@ -3931,6 +3930,10 @@ void netdev_rss_key_fill(void *buffer, size_t len);
int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
+int skb_crc32c_csum_help(struct sk_buff *skb);
+int skb_csum_hwoffload_help(struct sk_buff *skb,
+ const netdev_features_t features);
+
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 5fff5ba5964e..a68aad484c69 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -97,6 +97,11 @@ struct netlink_ext_ack {
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
+#define NL_SET_BAD_ATTR(extack, attr) do { \
+ if ((extack)) \
+ (extack)->bad_attr = (attr); \
+} while (0)
+
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e76e4adbc7c7..5a808a26e4cf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -58,8 +58,7 @@
#define PHY_IGNORE_INTERRUPT -2
#define PHY_HAS_INTERRUPT 0x00000001
-#define PHY_HAS_MAGICANEG 0x00000002
-#define PHY_IS_INTERNAL 0x00000004
+#define PHY_IS_INTERNAL 0x00000002
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -717,14 +716,24 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_interface_mode_is_rgmii - Convenience function for testing if a
+ * PHY interface mode is RGMII (all variants)
+ * @mode: the phy_interface_t enum
+ */
+static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
+{
+ return mode >= PHY_INTERFACE_MODE_RGMII &&
+ mode <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/**
* phy_interface_is_rgmii - Convenience function for testing if a PHY interface
* is RGMII (all variants)
* @phydev: the phy_device struct
*/
static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
{
- return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
- phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+ return phy_interface_mode_is_rgmii(phydev->interface);
};
/*
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6b2e0dd88569..d8c97ec8a8e6 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -278,6 +278,22 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
return ptr;
}
+static inline int __ptr_ring_consume_batched(struct ptr_ring *r,
+ void **array, int n)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ ptr = __ptr_ring_consume(r);
+ if (!ptr)
+ break;
+ array[i] = ptr;
+ }
+
+ return i;
+}
+
/*
* Note: resize (below) nests producer lock within consumer lock, so if you
* call this in interrupt or BH context, you must disable interrupts/BH when
@@ -328,6 +344,55 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
return ptr;
}
+static inline int ptr_ring_consume_batched(struct ptr_ring *r,
+ void **array, int n)
+{
+ int ret;
+
+ spin_lock(&r->consumer_lock);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock(&r->consumer_lock);
+
+ return ret;
+}
+
+static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r,
+ void **array, int n)
+{
+ int ret;
+
+ spin_lock_irq(&r->consumer_lock);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock_irq(&r->consumer_lock);
+
+ return ret;
+}
+
+static inline int ptr_ring_consume_batched_any(struct ptr_ring *r,
+ void **array, int n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&r->consumer_lock, flags);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock_irqrestore(&r->consumer_lock, flags);
+
+ return ret;
+}
+
+static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
+ void **array, int n)
+{
+ int ret;
+
+ spin_lock_bh(&r->consumer_lock);
+ ret = __ptr_ring_consume_batched(r, array, n);
+ spin_unlock_bh(&r->consumer_lock);
+
+ return ret;
+}
+
/* Cast to structure type and call a function without discarding from FIFO.
* Function must return a value.
* Callers must take consumer_lock.
@@ -403,6 +468,61 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
return 0;
}
+/*
+ * Return entries into ring. Destroy entries that don't fit.
+ *
+ * Note: this is expected to be a rare slow path operation.
+ *
+ * Note: producer lock is nested within consumer lock, so if you
+ * resize you must make sure all uses nest correctly.
+ * In particular if you consume ring in interrupt or BH context, you must
+ * disable interrupts/BH when doing so.
+ */
+static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
+ void (*destroy)(void *))
+{
+ unsigned long flags;
+ int head;
+
+ spin_lock_irqsave(&r->consumer_lock, flags);
+ spin_lock(&r->producer_lock);
+
+ if (!r->size)
+ goto done;
+
+ /*
+ * Clean out buffered entries (for simplicity). This way following code
+ * can test entries for NULL and if not assume they are valid.
+ */
+ head = r->consumer_head - 1;
+ while (likely(head >= r->consumer_tail))
+ r->queue[head--] = NULL;
+ r->consumer_tail = r->consumer_head;
+
+ /*
+ * Go over entries in batch, start moving head back and copy entries.
+ * Stop when we run into previously unconsumed entries.
+ */
+ while (n) {
+ head = r->consumer_head - 1;
+ if (head < 0)
+ head = r->size - 1;
+ if (r->queue[head]) {
+ /* This batch entry will have to be destroyed. */
+ goto done;
+ }
+ r->queue[head] = batch[--n];
+ r->consumer_tail = r->consumer_head = head;
+ }
+
+done:
+ /* Destroy all entries left in the batch. */
+ while (n)
+ destroy(batch[--n]);
+ spin_unlock(&r->producer_lock);
+ spin_unlock_irqrestore(&r->consumer_lock, flags);
+}
+
static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
int size, gfp_t gfp,
void (*destroy)(void *))
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index fbab6e0514f0..a567cbf8c5b4 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -96,12 +96,12 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 32
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 15
-#define FW_REVISION_VERSION 3
+#define FW_MINOR_VERSION 20
+#define FW_REVISION_VERSION 0
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -181,6 +181,14 @@
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
/*****************/
/* DQ CONSTANTS */
/*****************/
@@ -457,7 +465,6 @@
#define PXP_BAR_DQ 1
/* PTT and GTT */
-#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
@@ -482,6 +489,7 @@
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+#define PXP_NUM_PF_WINDOWS 12
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
@@ -618,16 +626,21 @@
/*****************/
/* PRM CONSTANTS */
/*****************/
-#define PRM_DMA_PAD_BYTES_NUM 2
-/******************/
-/* SDMs CONSTANTS */
-/******************/
-#define SDM_OP_GEN_TRIG_NONE 0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
-#define SDM_OP_GEN_TRIG_AGG_INT 2
-#define SDM_OP_GEN_TRIG_LOADER 4
-#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
-#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
+#define PRM_DMA_PAD_BYTES_NUM 2
+/*****************/
+/* SDMs CONSTANTS */
+/*****************/
+
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
+
+/********************/
+/* Completion types */
+/********************/
#define SDM_COMP_TYPE_NONE 0
#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -638,10 +651,11 @@
#define SDM_COMP_TYPE_INDICATE_ERROR 6
#define SDM_COMP_TYPE_RELEASE_THREAD 7
#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9
-/******************/
-/* PBF CONSTANTS */
-/******************/
+/*****************/
+/* PBF Constants */
+/*****************/
/* Number of PBF command queue lines. Each line is 32B. */
#define PBF_MAX_CMD_LINES 3328
@@ -861,7 +875,7 @@ enum db_dest {
/* Enum of doorbell DPM types */
enum db_dpm_type {
DPM_LEGACY,
- DPM_ROCE,
+ DPM_RDMA,
DPM_L2_INLINE,
DPM_L2_BD,
MAX_DB_DPM_TYPE
@@ -884,8 +898,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
+#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
@@ -931,31 +945,33 @@ struct db_pwm_addr {
};
/* Parameters to RoCE firmware, passed in EDPM doorbell */
-struct db_roce_dpm_params {
+struct db_rdma_dpm_params {
__le32 params;
-#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
-#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
-struct db_roce_dpm_data {
+struct db_rdma_dpm_data {
__le16 icid;
__le16 prod_val;
- struct db_roce_dpm_params params;
+ struct db_rdma_dpm_params params;
};
/* Igu interrupt command */
@@ -1026,6 +1042,42 @@ struct parsing_and_err_flags {
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
+struct parsing_err_flags {
+ __le16 flags;
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
struct pb_context {
__le32 crc[4];
};
@@ -1288,39 +1340,56 @@ struct tdif_task_context {
struct timers_context {
__le32 logical_client_0;
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
-#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
-#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
-#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
-#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
__le32 logical_client_1;
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
-#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
-#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
-#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
-#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
__le32 logical_client_2;
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
-#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
-#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
-#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
-#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
__le32 host_expiration_fields;
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
-#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
-#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
+
+enum tunnel_next_protocol {
+ e_unknown = 0,
+ e_l2 = 1,
+ e_ipv4 = 2,
+ e_ipv6 = 3,
+ MAX_TUNNEL_NEXT_PROTOCOL
+};
+
#endif /* __COMMON_HSI__ */
#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 34d93eb5bfba..cb06e6e368e1 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -75,7 +75,8 @@
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 947a635d04bb..12fc9e788eea 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -13,7 +13,6 @@
/*********************/
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
-#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600)
struct fcoe_abts_pkt {
__le32 abts_rsp_fc_payload_lo;
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 69949f8e354b..85e086cba639 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -75,25 +75,13 @@
#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
-#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0)
-#define ISCSI_OPCODE_NOP_OUT ( \
- ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40)
-#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1)
-#define ISCSI_OPCODE_SCSI_CMD ( \
- ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40)
-#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2)
-#define ISCSI_OPCODE_TMF_REQUEST ( \
- ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40)
-#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3)
-#define ISCSI_OPCODE_LOGIN_REQUEST ( \
- ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40)
-#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4)
-#define ISCSI_OPCODE_TEXT_REQUEST ( \
- ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40)
-#define ISCSI_OPCODE_DATA_OUT (5)
-#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6)
-#define ISCSI_OPCODE_LOGOUT_REQUEST ( \
- ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40)
+#define ISCSI_OPCODE_NOP_OUT (0)
+#define ISCSI_OPCODE_SCSI_CMD (1)
+#define ISCSI_OPCODE_TMF_REQUEST (2)
+#define ISCSI_OPCODE_LOGIN_REQUEST (3)
+#define ISCSI_OPCODE_TEXT_REQUEST (4)
+#define ISCSI_OPCODE_DATA_OUT (5)
+#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
/* iSCSI response/messages op codes */
#define ISCSI_OPCODE_NOP_IN (0x20)
@@ -172,17 +160,23 @@ struct iscsi_async_msg_hdr {
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_CMD_HDR_READ_MASK 0x1
-#define ISCSI_CMD_HDR_READ_SHIFT 6
-#define ISCSI_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT 7
- u8 opcode;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
+#define ISCSI_CMD_HDR_IMM_MASK 0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT 6
+#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
__le32 hdr_second_dword;
#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
@@ -790,9 +784,9 @@ enum iscsi_error_types {
ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
ISCSI_CONN_ERROR_DATA_OVERRUN,
ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
- ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR,
- ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
- ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION,
+ ISCSI_CONN_ERROR_IP_OPTIONS_ERROR,
+ ISCSI_CONN_ERROR_PRS_ERRORS,
+ ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION,
ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE,
@@ -1304,22 +1298,6 @@ struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_total_pdu_cnt;
};
-struct iscsi_db_data {
- u8 params;
-#define ISCSI_DB_DATA_DEST_MASK 0x3
-#define ISCSI_DB_DATA_DEST_SHIFT 0
-#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
-#define ISCSI_DB_DATA_RESERVED_MASK 0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT 5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 sq_prod;
-};
-
struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
@@ -1398,5 +1376,20 @@ struct tstorm_iscsi_task_ag_ctx {
__le32 reg1;
__le32 reg2;
};
+struct iscsi_db_data {
+ u8 params;
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 sq_prod;
+};
#endif /* __ISCSI_COMMON__ */
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index c70ac13a97e6..73c46d6d5727 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -328,6 +328,14 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
+#define QED_MFW_VERSION_0_MASK 0x000000FF
+#define QED_MFW_VERSION_0_OFFSET 0
+#define QED_MFW_VERSION_1_MASK 0x0000FF00
+#define QED_MFW_VERSION_1_OFFSET 8
+#define QED_MFW_VERSION_2_MASK 0x00FF0000
+#define QED_MFW_VERSION_2_OFFSET 16
+#define QED_MFW_VERSION_3_MASK 0xFF000000
+#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size;
u8 mf_mode;
@@ -337,6 +345,15 @@ struct qed_dev_info {
bool wol_support;
+ /* MBI version */
+ u32 mbi_version;
+#define QED_MBI_VERSION_0_MASK 0x000000FF
+#define QED_MBI_VERSION_0_OFFSET 0
+#define QED_MBI_VERSION_1_MASK 0x0000FF00
+#define QED_MBI_VERSION_1_OFFSET 8
+#define QED_MBI_VERSION_2_MASK 0x00FF0000
+#define QED_MBI_VERSION_2_OFFSET 16
+
enum qed_dev_type dev_type;
/* Output parameters for qede */
@@ -503,9 +520,7 @@ struct qed_common_ops {
int (*set_power_state)(struct qed_dev *cdev,
pci_power_t state);
- void (*set_id)(struct qed_dev *cdev,
- char name[],
- char ver_str[]);
+ void (*set_name) (struct qed_dev *cdev, char name[]);
/* Client drivers need to make this call before slowpath_start.
* PF params required for the call before slowpath_start is
@@ -700,11 +715,13 @@ struct qed_common_ops {
(((value) >> (name ## _SHIFT)) & name ## _MASK)
/* Debug print definitions */
-#define DP_ERR(cdev, fmt, ...) \
- pr_err("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- DP_NAME(cdev) ? DP_NAME(cdev) : "", \
- ## __VA_ARGS__) \
+#define DP_ERR(cdev, fmt, ...) \
+ do { \
+ pr_err("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+ ## __VA_ARGS__); \
+ } while (0)
#define DP_NOTICE(cdev, fmt, ...) \
do { \
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 72c770f9f666..a9b3050f469c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -42,7 +42,7 @@
#define RDMA_MAX_SGE_PER_SQ_WQE (4)
#define RDMA_MAX_SGE_PER_RQ_WQE (4)
-#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF)
+#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 866f063026de..fe6a33e45977 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -37,6 +37,8 @@
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
#define ROCE_MAX_QPS (32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS (64)
+#define ROCE_DCQCN_RP_MAX_QPS (64)
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index a5e843268f0e..dbf7a43c3e1f 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -111,7 +111,6 @@ struct tcp_offload_params {
__le32 snd_wnd;
__le32 rcv_wnd;
__le32 snd_wl1;
- __le32 ts_time;
__le32 ts_recent;
__le32 ts_recent_age;
__le32 total_rt;
@@ -122,7 +121,7 @@ struct tcp_offload_params {
u8 ka_probe_cnt;
u8 rt_cnt;
__le16 rtt_var;
- __le16 reserved2;
+ __le16 fw_internal;
__le32 ka_timeout;
__le32 ka_interval;
__le32 max_rt_time;
@@ -130,7 +129,7 @@ struct tcp_offload_params {
u8 snd_wnd_scale;
u8 ack_frequency;
__le16 da_timeout_value;
- __le32 ts_ticks_per_second;
+ __le32 reserved3[2];
};
struct tcp_offload_params_opt2 {
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index f4dfade428f0..35226cd4efb0 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a)
return ptr_ring_consume(&a->ring);
}
+static inline int skb_array_consume_batched(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
{
return ptr_ring_consume_irq(&a->ring);
}
+static inline int skb_array_consume_batched_irq(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
{
return ptr_ring_consume_any(&a->ring);
}
+static inline int skb_array_consume_batched_any(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
+}
+
+
static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
{
return ptr_ring_consume_bh(&a->ring);
}
+static inline int skb_array_consume_batched_bh(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
+}
+
static inline int __skb_array_len_with_tag(struct sk_buff *skb)
{
if (likely(skb)) {
@@ -156,6 +181,12 @@ static void __skb_array_destroy_skb(void *ptr)
kfree_skb(ptr);
}
+static inline void skb_array_unconsume(struct skb_array *a,
+ struct sk_buff **skbs, int n)
+{
+ ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
+}
+
static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
{
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a098d95b3d84..45a59c1e0cc7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -109,6 +109,7 @@
* may perform further validation in this case.
* GRE: only if the checksum is present in the header.
* SCTP: indicates the CRC in SCTP header has been validated.
+ * FCOE: indicates the CRC in FC frame has been validated.
*
* skb->csum_level indicates the number of consecutive checksums found in
* the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
@@ -126,8 +127,10 @@
* packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
- * Note: Even if device supports only some protocols, but is able to produce
- * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+ * Notes:
+ * - Even if device supports only some protocols, but is able to produce
+ * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+ * - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
* CHECKSUM_PARTIAL:
*
@@ -162,14 +165,11 @@
*
* NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
* NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
- * checksum offload capability. If a device has limited checksum capabilities
- * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
- * described above) a helper function can be called to resolve
- * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
- * function takes a spec argument that describes the protocol layer that is
- * supported for checksum offload and can be called for each packet. If a
- * packet does not match the specification for offload, skb_checksum_help
- * is called to resolve the checksum.
+ * checksum offload capability.
+ * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
+ * on network device checksumming capabilities: if a packet does not match
+ * them, skb_checksum_help or skb_crc32c_help (depending on the value of
+ * csum_not_inet, see item D.) is called to resolve the checksum.
*
* CHECKSUM_NONE:
*
@@ -189,11 +189,13 @@
*
* NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
* offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note the there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
- * both IP checksum offload and SCTP CRC offload must verify which offload
- * is configured for a packet presumably by inspecting packet headers.
+ * will set set csum_start and csum_offset accordingly, set ip_summed to
+ * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
+ * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
+ * A driver that supports both IP checksum offload and SCTP CRC32c offload
+ * must verify which offload is configured for a packet by testing the
+ * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
+ * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
*
* NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
* offloading the FCOE CRC in a packet. To perform this offload the stack
@@ -506,66 +508,6 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
-/**
- * struct skb_mstamp - multi resolution time stamps
- * @stamp_us: timestamp in us resolution
- * @stamp_jiffies: timestamp in jiffies
- */
-struct skb_mstamp {
- union {
- u64 v64;
- struct {
- u32 stamp_us;
- u32 stamp_jiffies;
- };
- };
-};
-
-/**
- * skb_mstamp_get - get current timestamp
- * @cl: place to store timestamps
- */
-static inline void skb_mstamp_get(struct skb_mstamp *cl)
-{
- u64 val = local_clock();
-
- do_div(val, NSEC_PER_USEC);
- cl->stamp_us = (u32)val;
- cl->stamp_jiffies = (u32)jiffies;
-}
-
-/**
- * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
- * @t1: pointer to newest sample
- * @t0: pointer to oldest sample
- */
-static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
- const struct skb_mstamp *t0)
-{
- s32 delta_us = t1->stamp_us - t0->stamp_us;
- u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
-
- /* If delta_us is negative, this might be because interval is too big,
- * or local_clock() drift is too big : fallback using jiffies.
- */
- if (delta_us <= 0 ||
- delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
-
- delta_us = jiffies_to_usecs(delta_jiffies);
-
- return delta_us;
-}
-
-static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
- const struct skb_mstamp *t0)
-{
- s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
-
- if (!diff)
- diff = t1->stamp_us - t0->stamp_us;
- return diff > 0;
-}
-
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
@@ -616,6 +558,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @dst_pending_confirm: need to confirm neighbour
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
@@ -646,7 +589,7 @@ struct sk_buff {
union {
ktime_t tstamp;
- struct skb_mstamp skb_mstamp;
+ u64 skb_mstamp;
};
};
struct rb_node rbnode; /* used in netem & tcp stack */
@@ -744,7 +687,7 @@ struct sk_buff {
__u8 csum_valid:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
- __u8 csum_bad:1;
+ __u8 csum_not_inet:1;
__u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -915,6 +858,15 @@ static inline bool skb_pkt_type_ok(u32 ptype)
return ptype <= PACKET_OTHERHOST;
}
+static inline unsigned int skb_napi_id(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return skb->napi_id;
+#else
+ return 0;
+#endif
+}
+
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_tx_error(struct sk_buff *skb);
@@ -3056,6 +3008,13 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
const struct sk_buff *skb);
+struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
+ struct sk_buff_head *queue,
+ unsigned int flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
+ int *peeked, int *off, int *err,
+ struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
@@ -3129,6 +3088,8 @@ struct skb_checksum_ops {
__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
};
+extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
+
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
@@ -3298,13 +3259,6 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps);
-static inline void sw_tx_timestamp(struct sk_buff *skb)
-{
- if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
- !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
- skb_tstamp_tx(skb, NULL);
-}
-
/**
* skb_tx_timestamp() - Driver hook for transmit timestamping
*
@@ -3320,7 +3274,8 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
- sw_tx_timestamp(skb);
+ if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
+ skb_tstamp_tx(skb, NULL);
}
/**
@@ -3386,21 +3341,6 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
}
}
-static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
-{
- /* Mark current checksum as bad (typically called from GRO
- * path). In the case that ip_summed is CHECKSUM_NONE
- * this must be the first checksum encountered in the packet.
- * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
- * checksum after the last one validated. For UDP, a zero
- * checksum can not be marked as bad.
- */
-
- if (skb->ip_summed == CHECKSUM_NONE ||
- skb->ip_summed == CHECKSUM_UNNECESSARY)
- skb->csum_bad = 1;
-}
-
/* Check if we need to perform checksum complete validation.
*
* Returns true if checksum complete is needed, false otherwise
@@ -3454,9 +3394,6 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
skb->csum_valid = 1;
return 0;
}
- } else if (skb->csum_bad) {
- /* ip_summed == CHECKSUM_NONE in this case */
- return (__force __sum16)1;
}
skb->csum = psum;
@@ -3516,8 +3453,7 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
- return (skb->ip_summed == CHECKSUM_NONE &&
- skb->csum_valid && !skb->csum_bad);
+ return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
}
static inline void __skb_checksum_convert(struct sk_buff *skb,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b6d5adcee8fc..542ca1ae02c4 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -123,7 +123,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
- struct skb_mstamp snt_synack; /* first SYNACK sent time */
+ u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
u32 txhash;
u32 rcv_isn;
@@ -211,7 +211,7 @@ struct tcp_sock {
/* Information of the most recently (s)acked skb */
struct tcp_rack {
- struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u64 mstamp; /* (Re)sent time of the skb */
u32 rtt_us; /* Associated RTT */
u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */
@@ -240,7 +240,7 @@ struct tcp_sock {
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */
- struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */
+ u64 tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
@@ -280,8 +280,8 @@ struct tcp_sock {
u32 delivered; /* Total data packets delivered incl. rexmits */
u32 lost; /* Total data packets lost incl. rexmits */
u32 app_limited; /* limited until "delivered" reaches this val */
- struct skb_mstamp first_tx_mstamp; /* start of window send phase */
- struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u64 first_tx_mstamp; /* start of window send phase */
+ u64 delivered_mstamp; /* time we reached "delivered" */
u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */
@@ -293,6 +293,8 @@ struct tcp_sock {
u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */
+ struct hrtimer pacing_timer;
+
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
@@ -333,16 +335,16 @@ struct tcp_sock {
/* Receiver side RTT estimation */
struct {
- u32 rtt_us;
- u32 seq;
- struct skb_mstamp time;
+ u32 rtt_us;
+ u32 seq;
+ u64 time;
} rcv_rtt_est;
/* Receiver queue space */
struct {
- int space;
- u32 seq;
- struct skb_mstamp time;
+ int space;
+ u32 seq;
+ u64 time;
} rcvq_space;
/* TCP-specific MTU probe information. */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 6cb4061a720d..eaea63bc79bb 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -80,6 +80,9 @@ struct udp_sock {
struct sk_buff *skb,
int nhoff);
+ /* udp_recvmsg try to use this before splicing sk_receive_queue */
+ struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
+
/* This field is dirtied by udp_recvmsg() */
int forward_deficit;
};
diff --git a/include/net/act_api.h b/include/net/act_api.h
index cfa2ae33da9a..26ffd8333f50 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -42,6 +42,7 @@ struct tc_action {
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie *act_cookie;
+ struct tcf_chain *goto_chain;
};
#define tcf_head common.tcfa_head
#define tcf_index common.tcfa_index
@@ -180,12 +181,12 @@ int tcf_unregister_action(struct tc_action_ops *a,
int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
-int tcf_action_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind, struct list_head *);
-struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind);
+int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr, int bind,
+ struct list_head *actions);
+struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind);
int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 99aa5e5e3100..fe98f0a5bef0 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -399,6 +399,7 @@ enum {
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -1498,6 +1499,13 @@ struct hci_rp_le_read_max_data_len {
__le16 rx_time;
} __packed;
+#define HCI_OP_LE_SET_DEFAULT_PHY 0x2031
+struct hci_cp_le_set_default_phy {
+ __u8 all_phys;
+ __u8 tx_phys;
+ __u8 rx_phys;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 8e24677b1c62..c0e567c0c824 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/ethtool.h>
#include <net/devlink.h>
+#include <net/switchdev.h>
struct tc_action;
struct phy_device;
@@ -27,13 +28,13 @@ struct fixed_phy_status;
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = 0,
+ DSA_TAG_PROTO_BRCM,
DSA_TAG_PROTO_DSA,
- DSA_TAG_PROTO_TRAILER,
DSA_TAG_PROTO_EDSA,
- DSA_TAG_PROTO_BRCM,
- DSA_TAG_PROTO_QCA,
- DSA_TAG_PROTO_MTK,
DSA_TAG_PROTO_LAN9303,
+ DSA_TAG_PROTO_MTK,
+ DSA_TAG_PROTO_QCA,
+ DSA_TAG_PROTO_TRAILER,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -137,10 +138,9 @@ struct dsa_switch_tree {
const struct ethtool_ops *master_orig_ethtool_ops;
/*
- * The switch and port to which the CPU is attached.
+ * The switch port to which the CPU is attached.
*/
- struct dsa_switch *cpu_switch;
- s8 cpu_port;
+ struct dsa_port *cpu_dp;
/*
* Data for the individual switch chips.
@@ -251,7 +251,7 @@ struct dsa_switch {
static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
{
- return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+ return ds->dst->cpu_dp == &ds->ports[p];
}
static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
@@ -279,28 +279,12 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
* Else return the (DSA) port number that connects to the
* switch that is one hop closer to the cpu.
*/
- if (dst->cpu_switch == ds)
- return dst->cpu_port;
+ if (dst->cpu_dp->ds == ds)
+ return dst->cpu_dp->index;
else
- return ds->rtable[dst->cpu_switch->index];
+ return ds->rtable[dst->cpu_dp->ds->index];
}
-struct switchdev_trans;
-struct switchdev_obj;
-struct switchdev_obj_port_fdb;
-struct switchdev_obj_port_mdb;
-struct switchdev_obj_port_vlan;
-
-#define DSA_NOTIFIER_BRIDGE_JOIN 1
-#define DSA_NOTIFIER_BRIDGE_LEAVE 2
-
-/* DSA_NOTIFIER_BRIDGE_* */
-struct dsa_notifier_bridge_info {
- struct net_device *br;
- int sw_index;
- int port;
-};
-
struct dsa_switch_ops {
/*
* Legacy probing.
@@ -410,7 +394,7 @@ struct dsa_switch_ops {
const struct switchdev_obj_port_vlan *vlan);
int (*port_vlan_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
/*
* Forwarding database
@@ -425,7 +409,7 @@ struct dsa_switch_ops {
const struct switchdev_obj_port_fdb *fdb);
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
/*
* Multicast database
@@ -440,7 +424,7 @@ struct dsa_switch_ops {
const struct switchdev_obj_port_mdb *mdb);
int (*port_mdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_mdb *mdb,
- int (*cb)(struct switchdev_obj *obj));
+ switchdev_obj_dump_cb_t *cb);
/*
* RXNFC
diff --git a/include/net/dst.h b/include/net/dst.h
index cfc043784166..1969008783d8 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -31,9 +31,9 @@
struct sk_buff;
struct dst_entry {
+ struct net_device *dev;
struct rcu_head rcu_head;
struct dst_entry *child;
- struct net_device *dev;
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 8d21d448daa9..efe34eec61dc 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -157,6 +157,14 @@ struct flow_dissector_key_eth_addrs {
unsigned char src[ETH_ALEN];
};
+/**
+ * struct flow_dissector_key_tcp:
+ * @flags: flags
+ */
+struct flow_dissector_key_tcp {
+ __be16 flags;
+};
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -177,6 +185,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
+ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 5894730ec82a..975779d0e7b0 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -92,7 +92,7 @@ struct inet_frags {
*/
u32 rnd;
seqlock_t rnd_seqlock;
- int qsize;
+ unsigned int qsize;
unsigned int (*hashfn)(const struct inet_frag_queue *);
bool (*match)(const struct inet_frag_queue *q,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c979c878df1c..aa50e2e6fa2a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -277,7 +277,8 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
void *arg);
int fib6_add(struct fib6_node *root, struct rt6_info *rt,
- struct nl_info *info, struct mx6_config *mxc);
+ struct nl_info *info, struct mx6_config *mxc,
+ struct netlink_ext_ack *extack);
int fib6_del(struct rt6_info *rt, struct nl_info *info);
void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f5e625f53367..f3da9dd2a8db 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -90,7 +90,7 @@ void ip6_route_cleanup(void);
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-int ip6_route_add(struct fib6_config *cfg);
+int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack);
int ip6_ins_rt(struct rt6_info *);
int ip6_del_rt(struct rt6_info *);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f7f6aa789c61..dcbfd5dfd25e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -136,6 +136,7 @@ struct fib_rule;
struct fib_table;
struct fib_result {
+ __be32 prefix;
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
@@ -263,7 +264,8 @@ struct fib_table {
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
-int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
+ struct netlink_ext_ack *extack);
int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 269fd78bb0ae..f7762295b7b8 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -18,10 +18,31 @@ int register_tcf_proto_ops(struct tcf_proto_ops *ops);
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
#ifdef CONFIG_NET_CLS
-void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
+ bool create);
+void tcf_chain_put(struct tcf_chain *chain);
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain);
+void tcf_block_put(struct tcf_block *block);
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode);
+
#else
-static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+static inline
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain)
+{
+ return 0;
+}
+
+static inline void tcf_block_put(struct tcf_block *block)
+{
+}
+
+static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
{
+ return TC_ACT_UNSPEC;
}
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index bec46f63f10c..2579c209ea51 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -113,9 +113,6 @@ static inline void qdisc_run(struct Qdisc *q)
__qdisc_run(q);
}
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode);
-
static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
{
/* We need to take extra care in case the skb came via
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index a12a5d25b27e..53ced67c4ae9 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -29,7 +29,7 @@ struct proto;
struct request_sock_ops {
int family;
- int obj_size;
+ unsigned int obj_size;
struct kmem_cache *slab;
char *slab_name;
int (*rtx_syn_ack)(const struct sock *sk,
diff --git a/include/net/route.h b/include/net/route.h
index 2cc0e14c6359..08e689f23365 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -113,13 +113,16 @@ struct in_device;
int ip_rt_init(void);
void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
-struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
- const struct sk_buff *skb);
+struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
+ const struct sk_buff *skb);
+struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp,
+ struct fib_result *res,
+ const struct sk_buff *skb);
static inline struct rtable *__ip_route_output_key(struct net *net,
struct flowi4 *flp)
{
- return __ip_route_output_key_hash(net, flp, NULL);
+ return ip_route_output_key_hash(net, flp, NULL);
}
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
@@ -175,6 +178,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin);
+int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin,
+ struct fib_result *res);
static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 22e52093bfda..368850194c94 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -8,6 +8,7 @@
#include <linux/pkt_cls.h>
#include <linux/percpu.h>
#include <linux/dynamic_queue_limits.h>
+#include <linux/list.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -153,7 +154,7 @@ struct Qdisc_class_ops {
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
/* Filter manipulation */
- struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+ struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
@@ -192,8 +193,13 @@ struct Qdisc_ops {
struct tcf_result {
- unsigned long class;
- u32 classid;
+ union {
+ struct {
+ unsigned long class;
+ u32 classid;
+ };
+ const struct tcf_proto *goto_tp;
+ };
};
struct tcf_proto_ops {
@@ -236,6 +242,7 @@ struct tcf_proto {
struct Qdisc *q;
void *data;
const struct tcf_proto_ops *ops;
+ struct tcf_chain *chain;
struct rcu_head rcu;
};
@@ -247,6 +254,19 @@ struct qdisc_skb_cb {
unsigned char data[QDISC_CB_PRIV_LEN];
};
+struct tcf_chain {
+ struct tcf_proto __rcu *filter_chain;
+ struct tcf_proto __rcu **p_filter_chain;
+ struct list_head list;
+ struct tcf_block *block;
+ u32 index; /* chain index */
+ unsigned int refcnt;
+};
+
+struct tcf_block {
+ struct list_head chain_list;
+};
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
diff --git a/include/net/sock.h b/include/net/sock.h
index f33e3d134e0b..3467d9e89e7d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -253,6 +253,7 @@ struct sock_common {
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+ * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
* @sk_padding: unused element for alignment
@@ -396,7 +397,7 @@ struct sock {
__s32 sk_peek_off;
int sk_write_pending;
__u32 sk_dst_pending_confirm;
- /* Note: 32bit hole on 64bit arches */
+ u32 sk_pacing_status; /* see enum sk_pacing */
long sk_sndtimeo;
struct timer_list sk_timer;
__u32 sk_priority;
@@ -475,6 +476,12 @@ struct sock {
struct rcu_head sk_rcu;
};
+enum sk_pacing {
+ SK_PACING_NONE = 0,
+ SK_PACING_NEEDED = 1,
+ SK_PACING_FQ = 2,
+};
+
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
@@ -2035,8 +2042,8 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
-int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
- unsigned int flags,
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
+ struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb));
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38a7427ae902..82462db97183 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-__u32 cookie_init_timestamp(struct request_sock *req);
+u64 cookie_init_timestamp(struct request_sock *req);
bool cookie_timestamp_decode(struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
const struct net *net, const struct dst_entry *dst);
@@ -574,6 +574,7 @@ void tcp_fin(struct sock *sk);
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
{
+ hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
inet_csk_clear_xmit_timers(sk);
}
@@ -699,17 +700,61 @@ u32 __tcp_select_window(struct sock *sk);
void tcp_send_window_probe(struct sock *sk);
-/* TCP timestamps are only 32-bits, this causes a slight
- * complication on 64-bit systems since we store a snapshot
- * of jiffies in the buffer control blocks below. We decided
- * to use only the low 32-bits of jiffies and hide the ugly
- * casts with the following macro.
+/* TCP uses 32bit jiffies to save some space.
+ * Note that this is different from tcp_time_stamp, which
+ * historically has been the same until linux-4.13.
*/
-#define tcp_time_stamp ((__u32)(jiffies))
+#define tcp_jiffies32 ((u32)jiffies)
+
+/*
+ * Deliver a 32bit value for TCP timestamp option (RFC 7323)
+ * It is no longer tied to jiffies, but to 1 ms clock.
+ * Note: double check if you want to use tcp_jiffies32 instead of this.
+ */
+#define TCP_TS_HZ 1000
+
+static inline u64 tcp_clock_ns(void)
+{
+ return local_clock();
+}
+
+static inline u64 tcp_clock_us(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
+}
+
+/* This should only be used in contexts where tp->tcp_mstamp is up to date */
+static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+{
+ return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
+static inline u32 tcp_time_stamp_raw(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+
+/* Refresh 1us clock of a TCP socket,
+ * ensuring monotically increasing values.
+ */
+static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
+{
+ u64 val = tcp_clock_us();
+
+ if (val > tp->tcp_mstamp)
+ tp->tcp_mstamp = val;
+}
+
+static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
+{
+ return max_t(s64, t1 - t0, 0);
+}
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return skb->skb_mstamp.stamp_jiffies;
+ return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
@@ -774,9 +819,9 @@ struct tcp_skb_cb {
/* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered;
/* start of send pipeline phase */
- struct skb_mstamp first_tx_mstamp;
+ u64 first_tx_mstamp;
/* when we reached the "delivered" count */
- struct skb_mstamp delivered_mstamp;
+ u64 delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -892,7 +937,7 @@ struct ack_sample {
* A sample is invalid if "delivered" or "interval_us" is negative.
*/
struct rate_sample {
- struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */
@@ -1241,7 +1286,7 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
ca_ops->cong_control)
return;
- delta = tcp_time_stamp - tp->lsndtime;
+ delta = tcp_jiffies32 - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
tcp_cwnd_restart(sk, delta);
}
@@ -1303,8 +1348,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
{
const struct inet_connection_sock *icsk = &tp->inet_conn;
- return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
- tcp_time_stamp - tp->rcv_tstamp);
+ return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
+ tcp_jiffies32 - tp->rcv_tstamp);
}
static inline int tcp_fin_time(const struct sock *sk)
@@ -1858,7 +1903,7 @@ void tcp_init(void);
/* tcp_recovery.c */
extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- const struct skb_mstamp *xmit_time);
+ u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
/*
@@ -1945,4 +1990,6 @@ static inline void tcp_listendrop(const struct sock *sk)
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
}
+enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
+
#endif /* _TCP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 3391dbd73959..1468dbd0f09a 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -249,13 +249,8 @@ void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
-static inline struct sk_buff *
-__skb_recv_udp(struct sock *sk, unsigned int flags, int noblock, int *peeked,
- int *off, int *err)
-{
- return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
- udp_skb_destructor, peeked, off, err);
-}
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
+ int noblock, int *peeked, int *off, int *err);
static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
int noblock, int *err)
{
diff --git a/include/net/udplite.h b/include/net/udplite.h
index ea340524f99b..b7a18f63d86d 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -26,8 +26,8 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
/* Designate sk as UDP-Lite socket */
static inline int udplite_sk_init(struct sock *sk)
{
+ udp_init_sock(sk);
udp_sk(sk)->pcflag = UDPLITE_BIT;
- sk->sk_destruct = udp_destruct_sock;
return 0;
}
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 0cd4c11479b1..b3d1aff5e8ad 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -668,6 +668,10 @@ struct ucc_slow_pram {
#define UCC_FAST_GUMR_CTSS 0x00800000
#define UCC_FAST_GUMR_TXSY 0x00020000
#define UCC_FAST_GUMR_RSYN 0x00010000
+#define UCC_FAST_GUMR_SYNL_MASK 0x0000C000
+#define UCC_FAST_GUMR_SYNL_16 0x0000C000
+#define UCC_FAST_GUMR_SYNL_8 0x00008000
+#define UCC_FAST_GUMR_SYNL_AUTO 0x00004000
#define UCC_FAST_GUMR_RTSM 0x00002000
#define UCC_FAST_GUMR_REVD 0x00000400
#define UCC_FAST_GUMR_ENR 0x00000020
@@ -785,6 +789,11 @@ struct ucc_slow_pram {
#define UCC_GETH_UPSMR_SMM 0x00000080
#define UCC_GETH_UPSMR_SGMM 0x00000020
+/* UCC Protocol Specific Mode Register (UPSMR), when used for HDLC */
+#define UCC_HDLC_UPSMR_RTE 0x02000000
+#define UCC_HDLC_UPSMR_BUS 0x00200000
+#define UCC_HDLC_UPSMR_CW8 0x00007000
+
/* UCC Transmit On Demand Register (UTODR) */
#define UCC_SLOW_TOD 0x8000
#define UCC_FAST_TOD 0x8000
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 2b488565599d..a5f6e819fafd 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -100,4 +100,6 @@
#define SO_COOKIE 57
+#define SCM_TIMESTAMPING_PKTINFO 58
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 464dcca5ed68..3d421d912193 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -9,6 +9,7 @@
#ifndef _NET_TIMESTAMPING_H
#define _NET_TIMESTAMPING_H
+#include <linux/types.h>
#include <linux/socket.h> /* for SO_TIMESTAMPING */
/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
@@ -26,8 +27,10 @@ enum {
SOF_TIMESTAMPING_OPT_CMSG = (1<<10),
SOF_TIMESTAMPING_OPT_TSONLY = (1<<11),
SOF_TIMESTAMPING_OPT_STATS = (1<<12),
+ SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
+ SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_STATS,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
@@ -125,6 +128,16 @@ enum hwtstamp_rx_filters {
HWTSTAMP_FILTER_PTP_V2_SYNC,
/* PTP v2/802.AS1, any layer, Delay_req packet */
HWTSTAMP_FILTER_PTP_V2_DELAY_REQ,
+
+ /* NTP, UDP, all versions and packet modes */
+ HWTSTAMP_FILTER_NTP_ALL,
+};
+
+/* SCM_TIMESTAMPING_PKTINFO control message */
+struct scm_ts_pktinfo {
+ __u32 if_index;
+ __u32 pkt_length;
+ __u32 reserved[2];
};
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d613be3b3239..c6e8cf5e9c40 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -51,6 +51,7 @@ enum {
(((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode)
#define TC_ACT_JUMP __TC_ACT_EXT(1)
+#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
/* Action type identifiers*/
enum {
@@ -450,6 +451,9 @@ enum {
TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
+ TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */
+ TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index cce061382e40..564790e854f7 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -278,6 +278,7 @@ enum rt_scope_t {
#define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */
#define RTM_F_PREFIX 0x800 /* Prefix addresses */
#define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */
+#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
/* Reserved table identifiers */
@@ -549,6 +550,7 @@ enum {
TCA_STAB,
TCA_PAD,
TCA_DUMP_INVISIBLE,
+ TCA_CHAIN,
__TCA_MAX
};
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 42d0997e2fbb..8a8f77a247e6 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -733,7 +733,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
EXPORT_SYMBOL(bt_procfs_init);
EXPORT_SYMBOL(bt_procfs_cleanup);
-static struct net_proto_family bt_sock_family_ops = {
+static const struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
.create = bt_sock_create,
diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c
index 24d4e60f8c48..c7b1a9aee579 100644
--- a/net/bluetooth/ecdh_helper.c
+++ b/net/bluetooth/ecdh_helper.c
@@ -89,11 +89,9 @@ bool compute_ecdh_secret(const u8 public_key[64], const u8 private_key[32],
p.curve_id = ECC_CURVE_NIST_P256;
buf_len = crypto_ecdh_key_len(&p);
buf = kmalloc(buf_len, GFP_KERNEL);
- if (!buf) {
- pr_err("alg: kpp: Failed to allocate %d bytes for buf\n",
- buf_len);
+ if (!buf)
goto free_req;
- }
+
crypto_ecdh_encode_key(buf, buf_len, &p);
/* Set A private Key */
@@ -170,11 +168,8 @@ bool generate_ecdh_keys(u8 public_key[64], u8 private_key[32])
p.key_size = 32;
buf_len = crypto_ecdh_key_len(&p);
buf = kmalloc(buf_len, GFP_KERNEL);
- if (!buf) {
- pr_err("alg: kpp: Failed to allocate %d bytes for buf\n",
- buf_len);
+ if (!buf)
goto free_req;
- }
do {
if (tries++ >= max_tries)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 05686776a5fb..7655b4005dfb 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -148,13 +148,13 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
return -EINVAL;
/* When the diagnostic flags are not persistent and the transport
- * is not active, then there is no need for the vendor callback.
- *
- * Instead just store the desired value. If needed the setting
- * will be programmed when the controller gets powered on.
+ * is not active or in user channel operation, then there is no need
+ * for the vendor callback. Instead just store the desired value and
+ * the setting will be programmed when the controller gets powered on.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
- !test_bit(HCI_RUNNING, &hdev->flags))
+ (!test_bit(HCI_RUNNING, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
goto done;
hci_req_sync_lock(hdev);
@@ -635,6 +635,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
* Report
*/
+ /* If the controller supports Channel Selection Algorithm #2
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
+ events[2] |= 0x08; /* LE Channel Selection
+ * Algorithm
+ */
+
/* If the controller supports the LE Set Scan Enable command,
* enable the corresponding advertising report event.
*/
@@ -677,6 +685,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[34] & 0x04)
events[1] |= 0x01; /* LE Generate DHKey Complete */
+ /* If the controller supports the LE Set Default PHY or
+ * LE Set PHY commands, enable the corresponding event.
+ */
+ if (hdev->commands[35] & (0x20 | 0x40))
+ events[1] |= 0x08; /* LE PHY Update Complete */
+
hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
events);
@@ -771,6 +785,27 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
sizeof(support), &support);
}
+ /* Set Suggested Default Data Length to maximum if supported */
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
+ struct hci_cp_le_write_def_data_len cp;
+
+ cp.tx_len = hdev->le_max_tx_len;
+ cp.tx_time = hdev->le_max_tx_time;
+ hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
+ }
+
+ /* Set Default PHY parameters if command is supported */
+ if (hdev->commands[35] & 0x20) {
+ struct hci_cp_le_set_default_phy cp;
+
+ /* No transmitter PHY or receiver PHY preferences */
+ cp.all_phys = 0x03;
+ cp.tx_phys = 0;
+ cp.rx_phys = 0;
+
+ hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
+ }
+
return 0;
}
@@ -1384,6 +1419,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
* completed.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
ret = hdev->set_diag(hdev, true);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 7f8d05cf9065..f3aef22931ab 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -138,7 +138,7 @@ void br_manage_promisc(struct net_bridge *br)
/* If vlan filtering is disabled or bridge interface is placed
* into promiscuous mode, place all ports in promiscuous mode.
*/
- if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
+ if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
set_all = true;
list_for_each_entry(p, &br->port_list, list) {
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b0845480a3ae..09dcdb9c0f3c 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -599,7 +599,7 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
vg = nbp_vlan_group(p);
- if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+ if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
err = __br_mdb_add(net, br, entry);
@@ -694,7 +694,7 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
vg = nbp_vlan_group(p);
- if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+ if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
err = __br_mdb_del(br, entry);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index faa7261a992f..8dc5c8d69bcd 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2176,6 +2176,14 @@ unlock:
return err;
}
+bool br_multicast_enabled(const struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return !br->multicast_disabled;
+}
+EXPORT_SYMBOL_GPL(br_multicast_enabled);
+
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
unsigned long max_delay;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 574f78824d8a..1e63ec466d7c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1251,7 +1251,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
- u8 vlan_enabled = br_vlan_enabled(br);
+ u8 vlan_enabled = br_vlan_enabled(br->dev);
u64 clockval;
clockval = br_timer_value(&br->hello_timer);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0d177280aa84..20626927f433 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -854,10 +854,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
return vg->pvid;
}
-static inline int br_vlan_enabled(struct net_bridge *br)
-{
- return br->vlan_enabled;
-}
#else
static inline bool br_allowed_ingress(const struct net_bridge *br,
struct net_bridge_vlan_group *vg,
@@ -945,11 +941,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
return 0;
}
-static inline int br_vlan_enabled(struct net_bridge *br)
-{
- return 0;
-}
-
static inline int __br_vlan_filter_toggle(struct net_bridge *br,
unsigned long val)
{
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0db8102995a5..4efd5d54498a 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -150,7 +150,6 @@ static int br_stp_call_user(struct net_bridge *br, char *arg)
static void br_stp_start(struct net_bridge *br)
{
- struct net_bridge_port *p;
int err = -ENOENT;
if (net_eq(dev_net(br->dev), &init_net))
@@ -169,11 +168,6 @@ static void br_stp_start(struct net_bridge *br)
if (!err) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
-
- /* Stop hello and hold timers */
- del_timer(&br->hello_timer);
- list_for_each_entry(p, &br->port_list, list)
- del_timer(&p->hold_timer);
} else {
br->stp_enabled = BR_KERNEL_STP;
br_debug(br, "using kernel STP\n");
@@ -188,7 +182,6 @@ static void br_stp_start(struct net_bridge *br)
static void br_stp_stop(struct net_bridge *br)
{
- struct net_bridge_port *p;
int err;
if (br->stp_enabled == BR_USER_STP) {
@@ -197,10 +190,6 @@ static void br_stp_stop(struct net_bridge *br)
br_err(br, "failed to stop userspace STP (%d)\n", err);
/* To start timers on any ports left in blocking */
- mod_timer(&br->hello_timer, jiffies + br->hello_time);
- list_for_each_entry(p, &br->port_list, list)
- mod_timer(&p->hold_timer,
- round_jiffies(jiffies + BR_HOLD_TIME));
spin_lock_bh(&br->lock);
br_port_state_selection(br);
spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index b838213c408e..26a1a56639b2 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -706,6 +706,14 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
return __br_vlan_filter_toggle(br, val);
}
+bool br_vlan_enabled(const struct net_device *dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return !!br->vlan_enabled;
+}
+EXPORT_SYMBOL_GPL(br_vlan_enabled);
+
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
{
int err = 0;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 346ef6b00b8f..c16dd3a47fc6 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -111,7 +111,7 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
__wsum csum;
u8 proto;
- if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
+ if (!nft_bridge_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
@@ -226,9 +226,6 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
__be16 fo;
u8 proto = ip6h->nexthdr;
- if (skb->csum_bad)
- return false;
-
if (skb_csum_unnecessary(skb))
return true;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index adcad344c843..4674d17e7c08 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1099,7 +1099,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
}
-static struct net_proto_family caif_family_ops = {
+static const struct net_proto_family caif_family_ops = {
.family = PF_CAIF,
.create = caif_create,
.owner = THIS_MODULE,
diff --git a/net/core/datagram.c b/net/core/datagram.c
index db1866f2ffcf..bc46118486fe 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -161,6 +161,45 @@ done:
return skb;
}
+struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
+ struct sk_buff_head *queue,
+ unsigned int flags,
+ void (*destructor)(struct sock *sk,
+ struct sk_buff *skb),
+ int *peeked, int *off, int *err,
+ struct sk_buff **last)
+{
+ struct sk_buff *skb;
+ int _off = *off;
+
+ *last = queue->prev;
+ skb_queue_walk(queue, skb) {
+ if (flags & MSG_PEEK) {
+ if (_off >= skb->len && (skb->len || _off ||
+ skb->peeked)) {
+ _off -= skb->len;
+ continue;
+ }
+ if (!skb->len) {
+ skb = skb_set_peeked(skb);
+ if (unlikely(IS_ERR(skb))) {
+ *err = PTR_ERR(skb);
+ return NULL;
+ }
+ }
+ *peeked = 1;
+ atomic_inc(&skb->users);
+ } else {
+ __skb_unlink(skb, queue);
+ if (destructor)
+ destructor(sk, skb);
+ }
+ *off = _off;
+ return skb;
+ }
+ return NULL;
+}
+
/**
* __skb_try_recv_datagram - Receive a datagram skbuff
* @sk: socket
@@ -222,40 +261,14 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
- int _off = *off;
-
- *last = (struct sk_buff *)queue;
spin_lock_irqsave(&queue->lock, cpu_flags);
- skb_queue_walk(queue, skb) {
- *last = skb;
- if (flags & MSG_PEEK) {
- if (_off >= skb->len && (skb->len || _off ||
- skb->peeked)) {
- _off -= skb->len;
- continue;
- }
- if (!skb->len) {
- skb = skb_set_peeked(skb);
- if (IS_ERR(skb)) {
- error = PTR_ERR(skb);
- spin_unlock_irqrestore(&queue->lock,
- cpu_flags);
- goto no_packet;
- }
- }
- *peeked = 1;
- atomic_inc(&skb->users);
- } else {
- __skb_unlink(skb, queue);
- if (destructor)
- destructor(sk, skb);
- }
- spin_unlock_irqrestore(&queue->lock, cpu_flags);
- *off = _off;
- return skb;
- }
-
+ skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
+ peeked, off, &error, last);
spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ if (error)
+ goto no_packet;
+ if (skb)
+ return skb;
if (!sk_can_busy_loop(sk))
break;
@@ -335,8 +348,8 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
}
EXPORT_SYMBOL(__skb_free_datagram_locked);
-int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
- unsigned int flags,
+int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
+ struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb))
{
@@ -344,15 +357,15 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
if (flags & MSG_PEEK) {
err = -ENOENT;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (skb == skb_peek(&sk->sk_receive_queue)) {
- __skb_unlink(skb, &sk->sk_receive_queue);
+ spin_lock_bh(&sk_queue->lock);
+ if (skb == skb_peek(sk_queue)) {
+ __skb_unlink(skb, sk_queue);
atomic_dec(&skb->users);
if (destructor)
destructor(sk, skb);
err = 0;
}
- spin_unlock_bh(&sk->sk_receive_queue.lock);
+ spin_unlock_bh(&sk_queue->lock);
}
atomic_inc(&sk->sk_drops);
@@ -383,7 +396,8 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
{
- int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
+ int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
+ NULL);
kfree_skb(skb);
sk_mem_reclaim_partial(sk);
diff --git a/net/core/dev.c b/net/core/dev.c
index fca407b4a6ea..3d98fbf4cbb0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -105,6 +105,7 @@
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/highmem.h>
@@ -142,6 +143,7 @@
#include <linux/hrtimer.h>
#include <linux/netfilter_ingress.h>
#include <linux/crash_dump.h>
+#include <linux/sctp.h>
#include "net-sysfs.h"
@@ -161,6 +163,7 @@ static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_info(unsigned long val,
struct net_device *dev,
struct netdev_notifier_info *info);
+static struct napi_struct *napi_by_id(unsigned int napi_id);
/*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -865,6 +868,31 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
EXPORT_SYMBOL(dev_get_by_index);
/**
+ * dev_get_by_napi_id - find a device by napi_id
+ * @napi_id: ID of the NAPI struct
+ *
+ * Search for an interface by NAPI ID. Returns %NULL if the device
+ * is not found or a pointer to the device. The device has not had
+ * its reference counter increased so the caller must be careful
+ * about locking. The caller must hold RCU lock.
+ */
+
+struct net_device *dev_get_by_napi_id(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (napi_id < MIN_NAPI_ID)
+ return NULL;
+
+ napi = napi_by_id(napi_id);
+
+ return napi ? napi->dev : NULL;
+}
+EXPORT_SYMBOL(dev_get_by_napi_id);
+
+/**
* netdev_get_name - get a netdevice name, knowing its ifindex.
* @net: network namespace
* @name: a pointer to the buffer where the name will be stored.
@@ -2611,6 +2639,47 @@ out:
}
EXPORT_SYMBOL(skb_checksum_help);
+int skb_crc32c_csum_help(struct sk_buff *skb)
+{
+ __le32 crc32c_csum;
+ int ret = 0, offset, start;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto out;
+
+ if (unlikely(skb_is_gso(skb)))
+ goto out;
+
+ /* Before computing a checksum, we should make sure no frag could
+ * be modified by an external entity : checksum could be wrong.
+ */
+ if (unlikely(skb_has_shared_frag(skb))) {
+ ret = __skb_linearize(skb);
+ if (ret)
+ goto out;
+ }
+ start = skb_checksum_start_offset(skb);
+ offset = start + offsetof(struct sctphdr, checksum);
+ if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (skb_cloned(skb) &&
+ !skb_clone_writable(skb, offset + sizeof(__le32))) {
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (ret)
+ goto out;
+ }
+ crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
+ skb->len - start, ~(__u32)0,
+ crc32c_csum_stub));
+ *(__le32 *)(skb->data + offset) = crc32c_csum;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
+out:
+ return ret;
+}
+
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
@@ -2953,6 +3022,17 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
return skb;
}
+int skb_csum_hwoffload_help(struct sk_buff *skb,
+ const netdev_features_t features)
+{
+ if (unlikely(skb->csum_not_inet))
+ return !!(features & NETIF_F_SCTP_CRC) ? 0 :
+ skb_crc32c_csum_help(skb);
+
+ return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
+}
+EXPORT_SYMBOL(skb_csum_hwoffload_help);
+
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
{
netdev_features_t features;
@@ -2991,8 +3071,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
else
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
- if (!(features & NETIF_F_CSUM_MASK) &&
- skb_checksum_help(skb))
+ if (skb_csum_hwoffload_help(skb, features))
goto out_kfree_skb;
}
}
@@ -3178,7 +3257,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res, false)) {
+ switch (tcf_classify(skb, cl, &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
@@ -3948,7 +4027,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
skb->tc_at_ingress = 1;
qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res, false)) {
+ switch (tcf_classify(skb, cl, &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
@@ -4636,9 +4715,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (netif_elide_gro(skb->dev))
goto normal;
- if (skb->csum_bad)
- goto normal;
-
gro_list_prepare(napi, skb);
rcu_read_lock();
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d293506..77f04e71100f 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -225,6 +225,7 @@ static int net_hwtstamp_validate(struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
rx_filter_valid = 1;
break;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 28d94bce4df8..5a45943081f5 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -18,6 +18,7 @@
#include <linux/stddef.h>
#include <linux/if_ether.h>
#include <linux/mpls.h>
+#include <linux/tcp.h>
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
@@ -342,6 +343,30 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
}
+static void
+__skb_flow_dissect_tcp(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container, void *data, int thoff, int hlen)
+{
+ struct flow_dissector_key_tcp *key_tcp;
+ struct tcphdr *th, _th;
+
+ if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
+ return;
+
+ th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
+ if (!th)
+ return;
+
+ if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
+ return;
+
+ key_tcp = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ target_container);
+ key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -683,6 +708,10 @@ ip_proto_again:
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
goto mpls;
+ case IPPROTO_TCP:
+ __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
+ data, nhoff, hlen);
+ break;
default:
break;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 65ea0ff4017c..58e6cc70500d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -323,7 +323,11 @@ NETDEVICE_SHOW_RW(flags, fmt_hex);
static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
{
- int res, orig_len = dev->tx_queue_len;
+ unsigned int orig_len = dev->tx_queue_len;
+ int res;
+
+ if (new_len != (unsigned int)new_len)
+ return -ERANGE;
if (new_len != orig_len) {
dev->tx_queue_len = new_len;
@@ -349,7 +353,7 @@ static ssize_t tx_queue_len_store(struct device *dev,
return netdev_store(dev, attr, buf, len, change_tx_queue_len);
}
-NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
+NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
{
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9e2c0a7cb325..64953af4a3b1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2048,8 +2048,8 @@ static int do_setlink(const struct sk_buff *skb,
}
if (tb[IFLA_TXQLEN]) {
- unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
- unsigned long orig_len = dev->tx_queue_len;
+ unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
+ unsigned int orig_len = dev->tx_queue_len;
if (dev->tx_queue_len ^ value) {
dev->tx_queue_len = value;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e85dfbc..780b7c1563d0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2243,6 +2243,32 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
+{
+ net_warn_ratelimited(
+ "%s: attempt to compute crc32c without libcrc32c.ko\n",
+ __func__);
+ return 0;
+}
+
+static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
+ int offset, int len)
+{
+ net_warn_ratelimited(
+ "%s: attempt to compute crc32c without libcrc32c.ko\n",
+ __func__);
+ return 0;
+}
+
+static const struct skb_checksum_ops default_crc32c_ops = {
+ .update = warn_crc32c_csum_update,
+ .combine = warn_crc32c_csum_combine,
+};
+
+const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
+ &default_crc32c_ops;
+EXPORT_SYMBOL(crc32c_csum_stub);
+
/**
* skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
* @from: source buffer
@@ -3875,6 +3901,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
if (!sk)
return;
+ if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
+ skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
+ return;
+
tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
if (!skb_may_tx_timestamp(sk, tsonly))
return;
diff --git a/net/core/sock.c b/net/core/sock.c
index 727f924b7f91..bef844127e01 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1038,6 +1038,10 @@ set_rcvbuf:
#endif
case SO_MAX_PACING_RATE:
+ if (val != ~0U)
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
sk->sk_max_pacing_rate = val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 93106120f987..733f523707ac 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,10 +178,6 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
[DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
};
-static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
- [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
-};
-
/* DCB number of traffic classes nested attributes. */
static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
[DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
@@ -1463,8 +1459,15 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
struct dcb_app *app_data;
+
if (nla_type(attr) != DCB_ATTR_IEEE_APP)
continue;
+
+ if (nla_len(attr) < sizeof(struct dcb_app)) {
+ err = -ERANGE;
+ goto err;
+ }
+
app_data = nla_data(attr);
if (ops->ieee_setapp)
err = ops->ieee_setapp(netdev, app_data);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 5e3a7302f774..e1295d5f2c56 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -233,7 +233,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- const u32 now = ccid2_time_stamp;
+ const u32 now = ccid2_jiffies32;
struct ccid2_seq *next;
/* slow-start after idle periods (RFC 2581, RFC 2861) */
@@ -466,7 +466,7 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
* The cleanest solution is to not use the ccid2s_sent field at all
* and instead use DCCP timestamps: requires changes in other places.
*/
- ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
+ ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
}
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
@@ -478,7 +478,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
return;
}
- hc->tx_last_cong = ccid2_time_stamp;
+ hc->tx_last_cong = ccid2_jiffies32;
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
@@ -731,7 +731,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
hc->tx_rto = DCCP_TIMEOUT_INIT;
hc->tx_rpdupack = -1;
- hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp;
+ hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
hc->tx_cwnd_used = 0;
setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
(unsigned long)sk);
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 18c97543e522..6e50ef2898fb 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -27,7 +27,7 @@
* CCID-2 timestamping faces the same issues as TCP timestamping.
* Hence we reuse/share as much of the code as possible.
*/
-#define ccid2_time_stamp tcp_time_stamp
+#define ccid2_jiffies32 ((u32)jiffies)
/* NUMDUPACK parameter from RFC 4341, p. 6 */
#define NUMDUPACK 3
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 81a0868edb1d..297389b2ab35 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -25,16 +25,16 @@ config NET_DSA_TAG_DSA
config NET_DSA_TAG_EDSA
bool
-config NET_DSA_TAG_TRAILER
+config NET_DSA_TAG_LAN9303
bool
-config NET_DSA_TAG_QCA
+config NET_DSA_TAG_MTK
bool
-config NET_DSA_TAG_MTK
+config NET_DSA_TAG_TRAILER
bool
-config NET_DSA_TAG_LAN9303
+config NET_DSA_TAG_QCA
bool
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 0b747d75e65a..90e5aa6f7d0f 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,12 +1,12 @@
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o slave.o dsa2.o switch.o legacy.o
+dsa_core-y += dsa.o dsa2.o legacy.o port.o slave.o switch.o
# tagging formats
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
-dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
-dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
-dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
+dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
+dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae438da..3288a80d4d6c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -24,7 +24,7 @@
#include <linux/phy_fixed.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
@@ -40,26 +40,26 @@ static const struct dsa_device_ops none_ops = {
};
const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
+#endif
#ifdef CONFIG_NET_DSA_TAG_DSA
[DSA_TAG_PROTO_DSA] = &dsa_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
[DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
- [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_LAN9303
+ [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_BRCM
- [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_MTK
+ [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_QCA
[DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_MTK
- [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
-#endif
-#ifdef CONFIG_NET_DSA_TAG_LAN9303
- [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
#endif
[DSA_TAG_PROTO_NONE] = &none_ops,
};
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bfb63dc..4301f52e4f5a 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -18,7 +18,7 @@
#include <linux/rtnetlink.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
static LIST_HEAD(dsa_switch_trees);
@@ -443,8 +443,8 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
return err;
}
- if (dst->cpu_switch) {
- err = dsa_cpu_port_ethtool_setup(dst->cpu_switch);
+ if (dst->cpu_dp) {
+ err = dsa_cpu_port_ethtool_setup(dst->cpu_dp->ds);
if (err)
return err;
}
@@ -484,8 +484,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
dsa_ds_unapply(dst, ds);
}
- if (dst->cpu_switch)
- dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+ if (dst->cpu_dp)
+ dsa_cpu_port_ethtool_restore(dst->cpu_dp->ds);
pr_info("DSA: tree %d unapplied\n", dst->tree);
dst->applied = false;
@@ -518,10 +518,8 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
if (!dst->master_netdev)
dst->master_netdev = ethernet_dev;
- if (!dst->cpu_switch) {
- dst->cpu_switch = ds;
- dst->cpu_port = index;
- }
+ if (!dst->cpu_dp)
+ dst->cpu_dp = port;
tag_protocol = ds->ops->get_tag_protocol(ds);
dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index f4a88e485213..c1d4180651af 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -14,6 +14,56 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
+#include <net/dsa.h>
+
+enum {
+ DSA_NOTIFIER_AGEING_TIME,
+ DSA_NOTIFIER_BRIDGE_JOIN,
+ DSA_NOTIFIER_BRIDGE_LEAVE,
+ DSA_NOTIFIER_FDB_ADD,
+ DSA_NOTIFIER_FDB_DEL,
+ DSA_NOTIFIER_MDB_ADD,
+ DSA_NOTIFIER_MDB_DEL,
+ DSA_NOTIFIER_VLAN_ADD,
+ DSA_NOTIFIER_VLAN_DEL,
+};
+
+/* DSA_NOTIFIER_AGEING_TIME */
+struct dsa_notifier_ageing_time_info {
+ struct switchdev_trans *trans;
+ unsigned int ageing_time;
+};
+
+/* DSA_NOTIFIER_BRIDGE_* */
+struct dsa_notifier_bridge_info {
+ struct net_device *br;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_FDB_* */
+struct dsa_notifier_fdb_info {
+ const struct switchdev_obj_port_fdb *fdb;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_MDB_* */
+struct dsa_notifier_mdb_info {
+ const struct switchdev_obj_port_mdb *mdb;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
+
+/* DSA_NOTIFIER_VLAN_* */
+struct dsa_notifier_vlan_info {
+ const struct switchdev_obj_port_vlan *vlan;
+ struct switchdev_trans *trans;
+ int sw_index;
+ int port;
+};
struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -59,6 +109,39 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
int dsa_legacy_register(void);
void dsa_legacy_unregister(void);
+/* port.c */
+int dsa_port_set_state(struct dsa_port *dp, u8 state,
+ struct switchdev_trans *trans);
+void dsa_port_set_state_now(struct dsa_port *dp, u8 state);
+int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
+void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
+int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans);
+int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
+ struct switchdev_trans *trans);
+int dsa_port_fdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans);
+int dsa_port_fdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb);
+int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb);
+int dsa_port_mdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+int dsa_port_mdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb);
+int dsa_port_mdb_dump(struct dsa_port *dp, struct switchdev_obj_port_mdb *mdb,
+ switchdev_obj_dump_cb_t *cb);
+int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+int dsa_port_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan);
+int dsa_port_vlan_dump(struct dsa_port *dp,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb);
+
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
@@ -75,25 +158,25 @@ void dsa_slave_unregister_notifier(void);
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
+/* tag_brcm.c */
+extern const struct dsa_device_ops brcm_netdev_ops;
+
/* tag_dsa.c */
extern const struct dsa_device_ops dsa_netdev_ops;
/* tag_edsa.c */
extern const struct dsa_device_ops edsa_netdev_ops;
-/* tag_trailer.c */
-extern const struct dsa_device_ops trailer_netdev_ops;
+/* tag_lan9303.c */
+extern const struct dsa_device_ops lan9303_netdev_ops;
-/* tag_brcm.c */
-extern const struct dsa_device_ops brcm_netdev_ops;
+/* tag_mtk.c */
+extern const struct dsa_device_ops mtk_netdev_ops;
/* tag_qca.c */
extern const struct dsa_device_ops qca_netdev_ops;
-/* tag_mtk.c */
-extern const struct dsa_device_ops mtk_netdev_ops;
-
-/* tag_lan9303.c */
-extern const struct dsa_device_ops lan9303_netdev_ops;
+/* tag_trailer.c */
+extern const struct dsa_device_ops trailer_netdev_ops;
#endif
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8b0b06..ac4379b8d7ac 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -22,7 +22,7 @@
#include <linux/sysfs.h>
#include <linux/phy_fixed.h>
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
/* switch driver registration ***********************************************/
@@ -115,13 +115,12 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
continue;
if (!strcmp(name, "cpu")) {
- if (dst->cpu_switch) {
+ if (dst->cpu_dp) {
netdev_err(dst->master_netdev,
"multiple cpu ports?!\n");
return -EINVAL;
}
- dst->cpu_switch = ds;
- dst->cpu_port = i;
+ dst->cpu_dp = &ds->ports[i];
ds->cpu_port_mask |= 1 << i;
} else if (!strcmp(name, "dsa")) {
ds->dsa_port_mask |= 1 << i;
@@ -144,7 +143,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
* tagging protocol to the preferred tagging format of this
* switch.
*/
- if (dst->cpu_switch == ds) {
+ if (dst->cpu_dp->ds == ds) {
enum dsa_tag_protocol tag_protocol;
tag_protocol = ops->get_tag_protocol(ds);
@@ -624,7 +623,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
dst->pd = pd;
dst->master_netdev = dev;
- dst->cpu_port = -1;
for (i = 0; i < pd->nr_chips; i++) {
struct dsa_switch *ds;
@@ -735,7 +733,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
dsa_switch_destroy(ds);
}
- dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+ dsa_cpu_port_ethtool_restore(dst->cpu_dp->ds);
dev_put(dst->master_netdev);
}
diff --git a/net/dsa/port.c b/net/dsa/port.c
new file mode 100644
index 000000000000..efc3bce3a89d
--- /dev/null
+++ b/net/dsa/port.c
@@ -0,0 +1,259 @@
+/*
+ * Handling of a single switch port
+ *
+ * Copyright (c) 2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/if_bridge.h>
+#include <linux/notifier.h>
+
+#include "dsa_priv.h"
+
+static int dsa_port_notify(struct dsa_port *dp, unsigned long e, void *v)
+{
+ struct raw_notifier_head *nh = &dp->ds->dst->nh;
+ int err;
+
+ err = raw_notifier_call_chain(nh, e, v);
+
+ return notifier_to_errno(err);
+}
+
+int dsa_port_set_state(struct dsa_port *dp, u8 state,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
+
+ if (ds->ops->port_stp_state_set)
+ ds->ops->port_stp_state_set(ds, port, state);
+
+ if (ds->ops->port_fast_age) {
+ /* Fast age FDB entries or flush appropriate forwarding database
+ * for the given port, if we are moving it from Learning or
+ * Forwarding state, to Disabled or Blocking or Listening state.
+ */
+
+ if ((dp->stp_state == BR_STATE_LEARNING ||
+ dp->stp_state == BR_STATE_FORWARDING) &&
+ (state == BR_STATE_DISABLED ||
+ state == BR_STATE_BLOCKING ||
+ state == BR_STATE_LISTENING))
+ ds->ops->port_fast_age(ds, port);
+ }
+
+ dp->stp_state = state;
+
+ return 0;
+}
+
+void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
+{
+ int err;
+
+ err = dsa_port_set_state(dp, state, NULL);
+ if (err)
+ pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
+}
+
+int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
+{
+ struct dsa_notifier_bridge_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .br = br,
+ };
+ int err;
+
+ /* Here the port is already bridged. Reflect the current configuration
+ * so that drivers can program their chips accordingly.
+ */
+ dp->bridge_dev = br;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_JOIN, &info);
+
+ /* The bridging is rolled back on error */
+ if (err)
+ dp->bridge_dev = NULL;
+
+ return err;
+}
+
+void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
+{
+ struct dsa_notifier_bridge_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .br = br,
+ };
+ int err;
+
+ /* Here the port is already unbridged. Reflect the current configuration
+ * so that drivers can program their chips accordingly.
+ */
+ dp->bridge_dev = NULL;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_BRIDGE_LEAVE, &info);
+ if (err)
+ pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
+
+ /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
+ * so allow it to be in BR_STATE_FORWARDING to be kept functional
+ */
+ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+}
+
+int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (ds->ops->port_vlan_filtering)
+ return ds->ops->port_vlan_filtering(ds, dp->index,
+ vlan_filtering);
+
+ return 0;
+}
+
+int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
+ struct switchdev_trans *trans)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
+ unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
+ struct dsa_notifier_ageing_time_info info = {
+ .ageing_time = ageing_time,
+ .trans = trans,
+ };
+
+ if (switchdev_trans_ph_prepare(trans))
+ return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
+
+ dp->ageing_time = ageing_time;
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
+}
+
+int dsa_port_fdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .fdb = fdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
+}
+
+int dsa_port_fdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct dsa_notifier_fdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .fdb = fdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
+}
+
+int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->port_fdb_dump)
+ return ds->ops->port_fdb_dump(ds, dp->index, fdb, cb);
+
+ return -EOPNOTSUPP;
+}
+
+int dsa_port_mdb_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .mdb = mdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
+}
+
+int dsa_port_mdb_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct dsa_notifier_mdb_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .mdb = mdb,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
+}
+
+int dsa_port_mdb_dump(struct dsa_port *dp, struct switchdev_obj_port_mdb *mdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->port_mdb_dump)
+ return ds->ops->port_mdb_dump(ds, dp->index, mdb, cb);
+
+ return -EOPNOTSUPP;
+}
+
+int dsa_port_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .trans = trans,
+ .vlan = vlan,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
+}
+
+int dsa_port_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .vlan = vlan,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
+}
+
+int dsa_port_vlan_dump(struct dsa_port *dp,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->port_vlan_dump)
+ return ds->ops->port_vlan_dump(ds, dp->index, vlan, cb);
+
+ return -EOPNOTSUPP;
+}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7693182df81e..887e26695519 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -17,28 +17,16 @@
#include <linux/of_mdio.h>
#include <linux/mdio.h>
#include <linux/list.h>
-#include <net/dsa.h>
#include <net/rtnetlink.h>
-#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <linux/if_bridge.h>
#include <linux/netpoll.h>
+
#include "dsa_priv.h"
static bool dsa_slave_dev_check(struct net_device *dev);
-static int dsa_slave_notify(struct net_device *dev, unsigned long e, void *v)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct raw_notifier_head *nh = &p->dp->ds->dst->nh;
- int err;
-
- err = raw_notifier_call_chain(nh, e, v);
-
- return notifier_to_errno(err);
-}
-
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
@@ -86,33 +74,6 @@ static inline bool dsa_port_is_bridged(struct dsa_port *dp)
return !!dp->bridge_dev;
}
-static void dsa_slave_set_state(struct net_device *dev, u8 state)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_port *dp = p->dp;
- struct dsa_switch *ds = dp->ds;
- int port = dp->index;
-
- if (ds->ops->port_stp_state_set)
- ds->ops->port_stp_state_set(ds, port, state);
-
- if (ds->ops->port_fast_age) {
- /* Fast age FDB entries or flush appropriate forwarding database
- * for the given port, if we are moving it from Learning or
- * Forwarding state, to Disabled or Blocking or Listening state.
- */
-
- if ((dp->stp_state == BR_STATE_LEARNING ||
- dp->stp_state == BR_STATE_FORWARDING) &&
- (state == BR_STATE_DISABLED ||
- state == BR_STATE_BLOCKING ||
- state == BR_STATE_LISTENING))
- ds->ops->port_fast_age(ds, port);
- }
-
- dp->stp_state = state;
-}
-
static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -148,7 +109,7 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_promisc;
}
- dsa_slave_set_state(dev, stp_state);
+ dsa_port_set_state_now(p->dp, stp_state);
if (p->phy)
phy_start(p->phy);
@@ -190,7 +151,7 @@ static int dsa_slave_close(struct net_device *dev)
if (ds->ops->port_disable)
ds->ops->port_disable(ds, p->dp->index, p->phy);
- dsa_slave_set_state(dev, BR_STATE_DISABLED);
+ dsa_port_set_state_now(p->dp, BR_STATE_DISABLED);
return 0;
}
@@ -243,140 +204,6 @@ out:
return 0;
}
-static int dsa_slave_port_vlan_add(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_port *dp = p->dp;
- struct dsa_switch *ds = dp->ds;
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
- return -EOPNOTSUPP;
-
- return ds->ops->port_vlan_prepare(ds, dp->index, vlan, trans);
- }
-
- ds->ops->port_vlan_add(ds, dp->index, vlan, trans);
-
- return 0;
-}
-
-static int dsa_slave_port_vlan_del(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (!ds->ops->port_vlan_del)
- return -EOPNOTSUPP;
-
- return ds->ops->port_vlan_del(ds, p->dp->index, vlan);
-}
-
-static int dsa_slave_port_vlan_dump(struct net_device *dev,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_vlan_dump)
- return ds->ops->port_vlan_dump(ds, p->dp->index, vlan, cb);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_slave_port_fdb_add(struct net_device *dev,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
- return -EOPNOTSUPP;
-
- return ds->ops->port_fdb_prepare(ds, p->dp->index, fdb, trans);
- }
-
- ds->ops->port_fdb_add(ds, p->dp->index, fdb, trans);
-
- return 0;
-}
-
-static int dsa_slave_port_fdb_del(struct net_device *dev,
- const struct switchdev_obj_port_fdb *fdb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- int ret = -EOPNOTSUPP;
-
- if (ds->ops->port_fdb_del)
- ret = ds->ops->port_fdb_del(ds, p->dp->index, fdb);
-
- return ret;
-}
-
-static int dsa_slave_port_fdb_dump(struct net_device *dev,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_fdb_dump)
- return ds->ops->port_fdb_dump(ds, p->dp->index, fdb, cb);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_slave_port_mdb_add(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
- return -EOPNOTSUPP;
-
- return ds->ops->port_mdb_prepare(ds, p->dp->index, mdb, trans);
- }
-
- ds->ops->port_mdb_add(ds, p->dp->index, mdb, trans);
-
- return 0;
-}
-
-static int dsa_slave_port_mdb_del(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_mdb_del)
- return ds->ops->port_mdb_del(ds, p->dp->index, mdb);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_slave_port_mdb_dump(struct net_device *dev,
- struct switchdev_obj_port_mdb *mdb,
- switchdev_obj_dump_cb_t *cb)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (ds->ops->port_mdb_dump)
- return ds->ops->port_mdb_dump(ds, p->dp->index, mdb, cb);
-
- return -EOPNOTSUPP;
-}
-
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -387,96 +214,24 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static int dsa_slave_stp_state_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- if (switchdev_trans_ph_prepare(trans))
- return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
-
- dsa_slave_set_state(dev, attr->u.stp_state);
-
- return 0;
-}
-
-static int dsa_slave_vlan_filtering(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
-
- /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- if (ds->ops->port_vlan_filtering)
- return ds->ops->port_vlan_filtering(ds, p->dp->index,
- attr->u.vlan_filtering);
-
- return 0;
-}
-
-static unsigned int dsa_fastest_ageing_time(struct dsa_switch *ds,
- unsigned int ageing_time)
-{
- int i;
-
- for (i = 0; i < ds->num_ports; ++i) {
- struct dsa_port *dp = &ds->ports[i];
-
- if (dp && dp->ageing_time && dp->ageing_time < ageing_time)
- ageing_time = dp->ageing_time;
- }
-
- return ageing_time;
-}
-
-static int dsa_slave_ageing_time(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time);
- unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
-
- if (switchdev_trans_ph_prepare(trans)) {
- if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
- return -ERANGE;
- if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
- return -ERANGE;
- return 0;
- }
-
- /* Keep the fastest ageing time in case of multiple bridges */
- p->dp->ageing_time = ageing_time;
- ageing_time = dsa_fastest_ageing_time(ds, ageing_time);
-
- if (ds->ops->set_ageing_time)
- return ds->ops->set_ageing_time(ds, ageing_time);
-
- return 0;
-}
-
static int dsa_slave_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int ret;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ret = dsa_slave_stp_state_set(dev, attr, trans);
+ ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ret = dsa_slave_vlan_filtering(dev, attr, trans);
+ ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
+ trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- ret = dsa_slave_ageing_time(dev, attr, trans);
+ ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
break;
default:
ret = -EOPNOTSUPP;
@@ -490,6 +245,8 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err;
/* For the prepare phase, ensure the full set of changes is feasable in
@@ -499,18 +256,14 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = dsa_slave_port_fdb_add(dev,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- trans);
+ err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj), trans);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dsa_slave_port_mdb_add(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dsa_slave_port_vlan_add(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
break;
default:
err = -EOPNOTSUPP;
@@ -523,19 +276,19 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
static int dsa_slave_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = dsa_slave_port_fdb_del(dev,
- SWITCHDEV_OBJ_PORT_FDB(obj));
+ err = dsa_port_fdb_del(dp, SWITCHDEV_OBJ_PORT_FDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dsa_slave_port_mdb_del(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dsa_slave_port_vlan_del(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
+ err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -549,22 +302,19 @@ static int dsa_slave_port_obj_dump(struct net_device *dev,
struct switchdev_obj *obj,
switchdev_obj_dump_cb_t *cb)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = dsa_slave_port_fdb_dump(dev,
- SWITCHDEV_OBJ_PORT_FDB(obj),
- cb);
+ err = dsa_port_fdb_dump(dp, SWITCHDEV_OBJ_PORT_FDB(obj), cb);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = dsa_slave_port_mdb_dump(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- cb);
+ err = dsa_port_mdb_dump(dp, SWITCHDEV_OBJ_PORT_MDB(obj), cb);
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = dsa_slave_port_vlan_dump(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- cb);
+ err = dsa_port_vlan_dump(dp, SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
break;
default:
err = -EOPNOTSUPP;
@@ -574,57 +324,6 @@ static int dsa_slave_port_obj_dump(struct net_device *dev,
return err;
}
-static int dsa_slave_bridge_port_join(struct net_device *dev,
- struct net_device *br)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_notifier_bridge_info info = {
- .sw_index = p->dp->ds->index,
- .port = p->dp->index,
- .br = br,
- };
- int err;
-
- /* Here the port is already bridged. Reflect the current configuration
- * so that drivers can program their chips accordingly.
- */
- p->dp->bridge_dev = br;
-
- err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_JOIN, &info);
-
- /* The bridging is rolled back on error */
- if (err)
- p->dp->bridge_dev = NULL;
-
- return err;
-}
-
-static void dsa_slave_bridge_port_leave(struct net_device *dev,
- struct net_device *br)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_notifier_bridge_info info = {
- .sw_index = p->dp->ds->index,
- .port = p->dp->index,
- .br = br,
- };
- int err;
-
- /* Here the port is already unbridged. Reflect the current configuration
- * so that drivers can program their chips accordingly.
- */
- p->dp->bridge_dev = NULL;
-
- err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_LEAVE, &info);
- if (err)
- netdev_err(dev, "failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
-
- /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
- * so allow it to be in BR_STATE_FORWARDING to be kept functional
- */
- dsa_slave_set_state(dev, BR_STATE_FORWARDING);
-}
-
static int dsa_slave_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
@@ -821,8 +520,8 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
uint64_t *data)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->cpu_switch;
- s8 cpu_port = dst->cpu_port;
+ struct dsa_switch *ds = dst->cpu_dp->ds;
+ s8 cpu_port = dst->cpu_dp->index;
int count = 0;
if (dst->master_ethtool_ops.get_sset_count) {
@@ -838,7 +537,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->cpu_switch;
+ struct dsa_switch *ds = dst->cpu_dp->ds;
int count = 0;
if (dst->master_ethtool_ops.get_sset_count)
@@ -854,8 +553,8 @@ static void dsa_cpu_port_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->cpu_switch;
- s8 cpu_port = dst->cpu_port;
+ struct dsa_switch *ds = dst->cpu_dp->ds;
+ s8 cpu_port = dst->cpu_dp->index;
int len = ETH_GSTRING_LEN;
int mcount = 0, count;
unsigned int i;
@@ -1528,14 +1227,16 @@ static bool dsa_slave_dev_check(struct net_device *dev)
static int dsa_slave_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = p->dp;
int err = NOTIFY_DONE;
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) {
- err = dsa_slave_bridge_port_join(dev, info->upper_dev);
+ err = dsa_port_bridge_join(dp, info->upper_dev);
err = notifier_from_errno(err);
} else {
- dsa_slave_bridge_port_leave(dev, info->upper_dev);
+ dsa_port_bridge_leave(dp, info->upper_dev);
err = NOTIFY_OK;
}
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index ca6e26e514f0..d8e5c311ee7c 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -12,7 +12,47 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
-#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "dsa_priv.h"
+
+static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ int i;
+
+ for (i = 0; i < ds->num_ports; ++i) {
+ struct dsa_port *dp = &ds->ports[i];
+
+ if (dp->ageing_time && dp->ageing_time < ageing_time)
+ ageing_time = dp->ageing_time;
+ }
+
+ return ageing_time;
+}
+
+static int dsa_switch_ageing_time(struct dsa_switch *ds,
+ struct dsa_notifier_ageing_time_info *info)
+{
+ unsigned int ageing_time = info->ageing_time;
+ struct switchdev_trans *trans = info->trans;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
+ return -ERANGE;
+ if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
+ return -ERANGE;
+ return 0;
+ }
+
+ /* Program the fastest ageing time in case of multiple bridges */
+ ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
+
+ if (ds->ops->set_ageing_time)
+ return ds->ops->set_ageing_time(ds, ageing_time);
+
+ return 0;
+}
static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
@@ -40,6 +80,117 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
return 0;
}
+static int dsa_switch_fdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ const struct switchdev_obj_port_fdb *fdb = info->fdb;
+ struct switchdev_trans *trans = info->trans;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_prepare(ds, info->port, fdb, trans);
+ }
+
+ ds->ops->port_fdb_add(ds, info->port, fdb, trans);
+
+ return 0;
+}
+
+static int dsa_switch_fdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_fdb_info *info)
+{
+ const struct switchdev_obj_port_fdb *fdb = info->fdb;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (!ds->ops->port_fdb_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_fdb_del(ds, info->port, fdb);
+}
+
+static int dsa_switch_mdb_add(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ const struct switchdev_obj_port_mdb *mdb = info->mdb;
+ struct switchdev_trans *trans = info->trans;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_mdb_prepare(ds, info->port, mdb, trans);
+ }
+
+ ds->ops->port_mdb_add(ds, info->port, mdb, trans);
+
+ return 0;
+}
+
+static int dsa_switch_mdb_del(struct dsa_switch *ds,
+ struct dsa_notifier_mdb_info *info)
+{
+ const struct switchdev_obj_port_mdb *mdb = info->mdb;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (!ds->ops->port_mdb_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_mdb_del(ds, info->port, mdb);
+}
+
+static int dsa_switch_vlan_add(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ const struct switchdev_obj_port_vlan *vlan = info->vlan;
+ struct switchdev_trans *trans = info->trans;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_vlan_prepare(ds, info->port, vlan, trans);
+ }
+
+ ds->ops->port_vlan_add(ds, info->port, vlan, trans);
+
+ return 0;
+}
+
+static int dsa_switch_vlan_del(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ const struct switchdev_obj_port_vlan *vlan = info->vlan;
+
+ /* Do not care yet about other switch chips of the fabric */
+ if (ds->index != info->sw_index)
+ return 0;
+
+ if (!ds->ops->port_vlan_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_vlan_del(ds, info->port, vlan);
+}
+
static int dsa_switch_event(struct notifier_block *nb,
unsigned long event, void *info)
{
@@ -47,12 +198,33 @@ static int dsa_switch_event(struct notifier_block *nb,
int err;
switch (event) {
+ case DSA_NOTIFIER_AGEING_TIME:
+ err = dsa_switch_ageing_time(ds, info);
+ break;
case DSA_NOTIFIER_BRIDGE_JOIN:
err = dsa_switch_bridge_join(ds, info);
break;
case DSA_NOTIFIER_BRIDGE_LEAVE:
err = dsa_switch_bridge_leave(ds, info);
break;
+ case DSA_NOTIFIER_FDB_ADD:
+ err = dsa_switch_fdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_FDB_DEL:
+ err = dsa_switch_fdb_del(ds, info);
+ break;
+ case DSA_NOTIFIER_MDB_ADD:
+ err = dsa_switch_mdb_add(ds, info);
+ break;
+ case DSA_NOTIFIER_MDB_DEL:
+ err = dsa_switch_mdb_del(ds, info);
+ break;
+ case DSA_NOTIFIER_VLAN_ADD:
+ err = dsa_switch_vlan_add(ds, info);
+ break;
+ case DSA_NOTIFIER_VLAN_DEL:
+ err = dsa_switch_vlan_del(ds, info);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 2a9b52c5af86..9f204f18ada3 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -12,7 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
/* This tag length is 4 bytes, older ones were 6 bytes, we do not
@@ -101,7 +101,7 @@ static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
int source_port;
u8 *brcm_tag;
- ds = dst->cpu_switch;
+ ds = dst->cpu_dp->ds;
if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
goto out_drop;
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 1c6633f0de01..3b62a57956a3 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -11,7 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define DSA_HLEN 4
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index d9c668aa5e54..f95cafd05702 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -11,7 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define DSA_HLEN 4
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index 70130ed5c21a..afd59330b5f1 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
/* To define the outgoing port and to discover the incoming port a regular
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 837cdddb53f0..d1258e84cd71 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -13,7 +13,7 @@
*/
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define MTK_HDR_LEN 4
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 3ba3f59f7a34..2451007699b7 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -12,7 +12,7 @@
*/
#include <linux/etherdevice.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
#define QCA_HDR_LEN 2
@@ -99,7 +99,7 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
/* This protocol doesn't support cascading multiple switches so it's
* safe to assume the switch is first in the tree
*/
- ds = dst->cpu_switch;
+ ds = dst->cpu_dp->ds;
if (!ds)
goto out_drop;
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index aafc2fc74c30..7488ab2932ab 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -11,7 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <net/dsa.h>
+
#include "dsa_priv.h"
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -67,7 +67,7 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
u8 *trailer;
int source_port;
- ds = dst->cpu_switch;
+ ds = dst->cpu_dp->ds;
if (skb_linearize(skb))
goto out_drop;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index eedba7670b51..a60658c85a9a 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -301,15 +301,14 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto out_skb;
skb->dev = dev;
- skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
@@ -690,15 +689,14 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto out_skb;
skb->dev = dev;
- skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
- dev_put(dev);
-
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
+ dev_put(dev);
+
return err ?: size;
out_skb:
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 83e3ed258467..14d2f7bd7c76 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -594,7 +594,8 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
} else {
tb = fib_new_table(net, cfg.fc_table);
if (tb)
- err = fib_table_insert(net, tb, &cfg);
+ err = fib_table_insert(net, tb,
+ &cfg, NULL);
else
err = -ENOBUFS;
}
@@ -626,14 +627,15 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct fib_config *cfg)
+ struct nlmsghdr *nlh, struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct nlattr *attr;
int err, remaining;
struct rtmsg *rtm;
err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy,
- NULL);
+ extack);
if (err < 0)
goto errout;
@@ -654,6 +656,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
cfg->fc_nlinfo.nl_net = net;
if (cfg->fc_type > RTN_MAX) {
+ NL_SET_ERR_MSG(extack, "Invalid route type");
err = -EINVAL;
goto errout;
}
@@ -718,12 +721,13 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct fib_table *tb;
int err;
- err = rtm_to_fib_config(net, skb, nlh, &cfg);
+ err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
if (err < 0)
goto errout;
tb = fib_get_table(net, cfg.fc_table);
if (!tb) {
+ NL_SET_ERR_MSG(extack, "FIB table does not exist");
err = -ESRCH;
goto errout;
}
@@ -741,7 +745,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct fib_table *tb;
int err;
- err = rtm_to_fib_config(net, skb, nlh, &cfg);
+ err = rtm_to_fib_config(net, skb, nlh, &cfg, extack);
if (err < 0)
goto errout;
@@ -751,7 +755,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
}
- err = fib_table_insert(net, tb, &cfg);
+ err = fib_table_insert(net, tb, &cfg, extack);
errout:
return err;
}
@@ -845,7 +849,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
cfg.fc_scope = RT_SCOPE_HOST;
if (cmd == RTM_NEWROUTE)
- fib_table_insert(net, tb, &cfg);
+ fib_table_insert(net, tb, &cfg, NULL);
else
fib_table_delete(net, tb, &cfg);
}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 9c02920725db..2704e08545da 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -28,7 +28,8 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
/* Exported by fib_semantics.c */
void fib_release_info(struct fib_info *);
-struct fib_info *fib_create_info(struct fib_config *cfg);
+struct fib_info *fib_create_info(struct fib_config *cfg,
+ struct netlink_ext_ack *extack);
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ad9ad4aab5da..de9484658232 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -32,6 +32,7 @@
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/netlink.h>
#include <net/arp.h>
#include <net/ip.h>
@@ -456,7 +457,8 @@ static int fib_detect_death(struct fib_info *fi, int order,
#ifdef CONFIG_IP_ROUTE_MULTIPATH
-static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
+static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
+ struct netlink_ext_ack *extack)
{
int nhs = 0;
@@ -466,22 +468,35 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
}
/* leftover implies invalid nexthop configuration, discard it */
- return remaining > 0 ? 0 : nhs;
+ if (remaining > 0) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid nexthop configuration - extra data after nexthops");
+ nhs = 0;
+ }
+
+ return nhs;
}
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
- int remaining, struct fib_config *cfg)
+ int remaining, struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
int ret;
change_nexthops(fi) {
int attrlen;
- if (!rtnh_ok(rtnh, remaining))
+ if (!rtnh_ok(rtnh, remaining)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid nexthop configuration - extra data after nexthop");
return -EINVAL;
+ }
- if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
return -EINVAL;
+ }
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
@@ -507,8 +522,12 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
nla_entype = nla_find(attrs, attrlen,
RTA_ENCAP_TYPE);
- if (!nla_entype)
+ if (!nla_entype) {
+ NL_SET_BAD_ATTR(extack, nla);
+ NL_SET_ERR_MSG(extack,
+ "Encap type is missing");
goto err_inval;
+ }
ret = lwtunnel_build_state(nla_get_u16(
nla_entype),
@@ -716,7 +735,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
* |-> {local prefix} (terminal node)
*/
static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
- struct fib_nh *nh)
+ struct fib_nh *nh, struct netlink_ext_ack *extack)
{
int err = 0;
struct net *net;
@@ -729,16 +748,25 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
if (nh->nh_flags & RTNH_F_ONLINK) {
unsigned int addr_type;
- if (cfg->fc_scope >= RT_SCOPE_LINK)
+ if (cfg->fc_scope >= RT_SCOPE_LINK) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid scope");
return -EINVAL;
+ }
dev = __dev_get_by_index(net, nh->nh_oif);
if (!dev)
return -ENODEV;
- if (!(dev->flags & IFF_UP))
+ if (!(dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop device is not up");
return -ENETDOWN;
+ }
addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
- if (addr_type != RTN_UNICAST)
+ if (addr_type != RTN_UNICAST) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid gateway");
return -EINVAL;
+ }
if (!netif_carrier_ok(dev))
nh->nh_flags |= RTNH_F_LINKDOWN;
nh->nh_dev = dev;
@@ -778,18 +806,25 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
}
if (err) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid gateway");
rcu_read_unlock();
return err;
}
}
err = -EINVAL;
- if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
+ if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
+ NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
goto out;
+ }
nh->nh_scope = res.scope;
nh->nh_oif = FIB_RES_OIF(res);
nh->nh_dev = dev = FIB_RES_DEV(res);
- if (!dev)
+ if (!dev) {
+ NL_SET_ERR_MSG(extack,
+ "No egress device for nexthop gateway");
goto out;
+ }
dev_hold(dev);
if (!netif_carrier_ok(dev))
nh->nh_flags |= RTNH_F_LINKDOWN;
@@ -797,17 +832,21 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
} else {
struct in_device *in_dev;
- if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
+ if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
return -EINVAL;
-
+ }
rcu_read_lock();
err = -ENODEV;
in_dev = inetdev_by_index(net, nh->nh_oif);
if (!in_dev)
goto out;
err = -ENETDOWN;
- if (!(in_dev->dev->flags & IFF_UP))
+ if (!(in_dev->dev->flags & IFF_UP)) {
+ NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
goto out;
+ }
nh->nh_dev = in_dev->dev;
dev_hold(nh->nh_dev);
nh->nh_scope = RT_SCOPE_HOST;
@@ -982,7 +1021,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
return 0;
}
-struct fib_info *fib_create_info(struct fib_config *cfg)
+struct fib_info *fib_create_info(struct fib_config *cfg,
+ struct netlink_ext_ack *extack)
{
int err;
struct fib_info *fi = NULL;
@@ -994,15 +1034,20 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto err_inval;
/* Fast check to catch the most weird cases */
- if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
+ if (fib_props[cfg->fc_type].scope > cfg->fc_scope) {
+ NL_SET_ERR_MSG(extack, "Invalid scope");
goto err_inval;
+ }
- if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+ if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid rtm_flags - can not contain DEAD or LINKDOWN");
goto err_inval;
+ }
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (cfg->fc_mp) {
- nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
+ nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack);
if (nhs == 0)
goto err_inval;
}
@@ -1065,18 +1110,29 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (cfg->fc_mp) {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
+ err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack);
if (err != 0)
goto failure;
- if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
+ if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop device index does not match RTA_OIF");
goto err_inval;
- if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
+ }
+ if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop gateway does not match RTA_GATEWAY");
goto err_inval;
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
+ if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop class id does not match RTA_FLOW");
goto err_inval;
+ }
#endif
#else
+ NL_SET_ERR_MSG(extack,
+ "Multipath support not enabled in kernel");
goto err_inval;
#endif
} else {
@@ -1085,8 +1141,11 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (cfg->fc_encap) {
struct lwtunnel_state *lwtstate;
- if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+ if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE) {
+ NL_SET_ERR_MSG(extack,
+ "LWT encap type not specified");
goto err_inval;
+ }
err = lwtunnel_build_state(cfg->fc_encap_type,
cfg->fc_encap, AF_INET, cfg,
&lwtstate);
@@ -1109,8 +1168,11 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
}
if (fib_props[cfg->fc_type].error) {
- if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
+ if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) {
+ NL_SET_ERR_MSG(extack,
+ "Gateway, device and multipath can not be specified for this route type");
goto err_inval;
+ }
goto link_it;
} else {
switch (cfg->fc_type) {
@@ -1121,19 +1183,30 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
case RTN_MULTICAST:
break;
default:
+ NL_SET_ERR_MSG(extack, "Invalid route type");
goto err_inval;
}
}
- if (cfg->fc_scope > RT_SCOPE_HOST)
+ if (cfg->fc_scope > RT_SCOPE_HOST) {
+ NL_SET_ERR_MSG(extack, "Invalid scope");
goto err_inval;
+ }
if (cfg->fc_scope == RT_SCOPE_HOST) {
struct fib_nh *nh = fi->fib_nh;
/* Local address is added. */
- if (nhs != 1 || nh->nh_gw)
+ if (nhs != 1) {
+ NL_SET_ERR_MSG(extack,
+ "Route with host scope can not have multiple nexthops");
+ goto err_inval;
+ }
+ if (nh->nh_gw) {
+ NL_SET_ERR_MSG(extack,
+ "Route with host scope can not have a gateway");
goto err_inval;
+ }
nh->nh_scope = RT_SCOPE_NOWHERE;
nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
err = -ENODEV;
@@ -1143,7 +1216,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
int linkdown = 0;
change_nexthops(fi) {
- err = fib_check_nh(cfg, fi, nexthop_nh);
+ err = fib_check_nh(cfg, fi, nexthop_nh, extack);
if (err != 0)
goto failure;
if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
@@ -1153,8 +1226,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_flags |= RTNH_F_LINKDOWN;
}
- if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
+ if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) {
+ NL_SET_ERR_MSG(extack, "Invalid prefsrc address");
goto err_inval;
+ }
change_nexthops(fi) {
fib_info_update_nh_saddr(net, nexthop_nh);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 51182ff2b441..6e9df7d9bcc2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1101,7 +1101,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp,
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
- struct fib_config *cfg)
+ struct fib_config *cfg, struct netlink_ext_ack *extack)
{
enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
struct trie *t = (struct trie *)tb->tb_data;
@@ -1125,7 +1125,7 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
if ((plen < KEYLENGTH) && (key << plen))
return -EINVAL;
- fi = fib_create_info(cfg);
+ fi = fib_create_info(cfg, extack);
if (IS_ERR(fi)) {
err = PTR_ERR(fi);
goto err;
@@ -1452,6 +1452,7 @@ found:
if (!(fib_flags & FIB_LOOKUP_NOREF))
atomic_inc(&fi->fib_clntref);
+ res->prefix = htonl(n->key);
res->prefixlen = KEYLENGTH - fa->fa_slen;
res->nh_sel = nhsel;
res->type = fa->fa_type;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 805f6607f8d9..8e0257d01200 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <net/genetlink.h>
#include <net/gue.h>
+#include <net/fou.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/udp.h>
@@ -859,25 +860,6 @@ size_t gue_encap_hlen(struct ip_tunnel_encap *e)
}
EXPORT_SYMBOL(gue_encap_hlen);
-static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
- struct flowi4 *fl4, u8 *protocol, __be16 sport)
-{
- struct udphdr *uh;
-
- skb_push(skb, sizeof(struct udphdr));
- skb_reset_transport_header(skb);
-
- uh = udp_hdr(skb);
-
- uh->dest = e->dport;
- uh->source = sport;
- uh->len = htons(skb->len);
- udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
- fl4->saddr, fl4->daddr, skb->len);
-
- *protocol = IPPROTO_UDP;
-}
-
int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, __be16 *sport, int type)
{
@@ -894,24 +876,6 @@ int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
}
EXPORT_SYMBOL(__fou_build_header);
-int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4)
-{
- int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
- SKB_GSO_UDP_TUNNEL;
- __be16 sport;
- int err;
-
- err = __fou_build_header(skb, e, protocol, &sport, type);
- if (err)
- return err;
-
- fou_build_udp(skb, e, fl4, protocol, sport);
-
- return 0;
-}
-EXPORT_SYMBOL(fou_build_header);
-
int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, __be16 *sport, int type)
{
@@ -985,8 +949,46 @@ int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
}
EXPORT_SYMBOL(__gue_build_header);
-int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4)
+#ifdef CONFIG_NET_FOU_IP_TUNNELS
+
+static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ struct flowi4 *fl4, u8 *protocol, __be16 sport)
+{
+ struct udphdr *uh;
+
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+
+ uh = udp_hdr(skb);
+
+ uh->dest = e->dport;
+ uh->source = sport;
+ uh->len = htons(skb->len);
+ udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
+ fl4->saddr, fl4->daddr, skb->len);
+
+ *protocol = IPPROTO_UDP;
+}
+
+static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
+ SKB_GSO_UDP_TUNNEL;
+ __be16 sport;
+ int err;
+
+ err = __fou_build_header(skb, e, protocol, &sport, type);
+ if (err)
+ return err;
+
+ fou_build_udp(skb, e, fl4, protocol, sport);
+
+ return 0;
+}
+
+static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4)
{
int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
SKB_GSO_UDP_TUNNEL;
@@ -1001,9 +1003,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
return 0;
}
-EXPORT_SYMBOL(gue_build_header);
-#ifdef CONFIG_NET_FOU_IP_TUNNELS
static const struct ip_tunnel_encap_ops fou_iptun_ops = {
.encap_hlen = fou_encap_hlen,
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 43318b5f5647..5610971bf859 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -489,7 +489,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
- rt = __ip_route_output_key_hash(net, fl4, skb_in);
+ rt = ip_route_output_key_hash(net, fl4, skb_in);
if (IS_ERR(rt))
return rt;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1054d330bf9d..82dec8825d28 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -25,6 +25,7 @@
#include <net/xfrm.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
+#include <net/addrconf.h>
#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 7cd8d0d918f8..6f8d9e5e062b 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -172,7 +172,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
struct iphdr *iph = ip_hdr(skb_in);
u8 proto;
- if (skb_in->csum_bad || iph->frag_off & htons(IP_OFFSET))
+ if (iph->frag_off & htons(IP_OFFSET))
return;
if (skb_csum_unnecessary(skb_in)) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6883b3d4ba8f..f1f2e5aaa2d6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -114,6 +114,8 @@
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
+#include "fib_lookup.h"
+
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -1860,9 +1862,9 @@ static int ip_mkroute_input(struct sk_buff *skb,
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+ u8 tos, struct net_device *dev,
+ struct fib_result *res)
{
- struct fib_result res;
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct ip_tunnel_info *tun_info;
struct flowi4 fl4;
@@ -1892,8 +1894,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
- res.fi = NULL;
- res.table = NULL;
+ res->fi = NULL;
+ res->table = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
@@ -1929,17 +1931,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_uid = sock_net_uid(net, NULL);
- err = fib_lookup(net, &fl4, &res, 0);
+ err = fib_lookup(net, &fl4, res, 0);
if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
err = -EHOSTUNREACH;
goto no_route;
}
- if (res.type == RTN_BROADCAST)
+ if (res->type == RTN_BROADCAST)
goto brd_input;
- if (res.type == RTN_LOCAL) {
+ if (res->type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
0, dev, in_dev, &itag);
if (err < 0)
@@ -1951,10 +1953,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
err = -EHOSTUNREACH;
goto no_route;
}
- if (res.type != RTN_UNICAST)
+ if (res->type != RTN_UNICAST)
goto martian_destination;
- err = ip_mkroute_input(skb, &res, in_dev, daddr, saddr, tos);
+ err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
out: return err;
brd_input:
@@ -1968,14 +1970,14 @@ brd_input:
goto martian_source;
}
flags |= RTCF_BROADCAST;
- res.type = RTN_BROADCAST;
+ res->type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
do_cache = false;
- if (res.fi) {
+ if (res->fi) {
if (!itag) {
- rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
@@ -1986,7 +1988,7 @@ local_input:
}
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
- flags | RTCF_LOCAL, res.type,
+ flags | RTCF_LOCAL, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
@@ -1996,18 +1998,18 @@ local_input:
rth->dst.tclassid = itag;
#endif
rth->rt_is_input = 1;
- if (res.table)
- rth->rt_table_id = res.table->tb_id;
+ if (res->table)
+ rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(in_slow_tot);
- if (res.type == RTN_UNREACHABLE) {
+ if (res->type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
if (do_cache) {
- struct fib_nh *nh = &FIB_RES_NH(res);
+ struct fib_nh *nh = &FIB_RES_NH(*res);
rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
@@ -2027,9 +2029,9 @@ local_input:
no_route:
RT_CACHE_STAT_INC(in_no_route);
- res.type = RTN_UNREACHABLE;
- res.fi = NULL;
- res.table = NULL;
+ res->type = RTN_UNREACHABLE;
+ res->fi = NULL;
+ res->table = NULL;
goto local_input;
/*
@@ -2059,11 +2061,22 @@ martian_source:
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
- int res;
+ struct fib_result res;
+ int err;
tos &= IPTOS_RT_MASK;
rcu_read_lock();
+ err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
+ rcu_read_unlock();
+ return err;
+}
+EXPORT_SYMBOL(ip_route_input_noref);
+
+/* called with rcu_read_lock held */
+int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, struct fib_result *res)
+{
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
@@ -2078,6 +2091,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
int our = 0;
+ int err = -EINVAL;
if (in_dev)
our = ip_check_mc_rcu(in_dev, daddr, saddr,
@@ -2093,7 +2107,6 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
ip_hdr(skb)->protocol);
}
- res = -EINVAL;
if (our
#ifdef CONFIG_IP_MROUTE
||
@@ -2101,17 +2114,14 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
IN_DEV_MFORWARD(in_dev))
#endif
) {
- res = ip_route_input_mc(skb, daddr, saddr,
+ err = ip_route_input_mc(skb, daddr, saddr,
tos, dev, our);
}
- rcu_read_unlock();
- return res;
+ return err;
}
- res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
- rcu_read_unlock();
- return res;
+
+ return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
}
-EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
@@ -2254,29 +2264,40 @@ add:
* Major route resolver routine.
*/
-struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
- const struct sk_buff *skb)
+struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
+ const struct sk_buff *skb)
{
- struct net_device *dev_out = NULL;
__u8 tos = RT_FL_TOS(fl4);
- unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
- int orig_oif;
- int err = -ENETUNREACH;
res.tclassid = 0;
res.fi = NULL;
res.table = NULL;
- orig_oif = fl4->flowi4_oif;
-
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
rcu_read_lock();
+ rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
+ rcu_read_unlock();
+
+ return rth;
+}
+EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
+
+struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
+ struct fib_result *res,
+ const struct sk_buff *skb)
+{
+ struct net_device *dev_out = NULL;
+ int orig_oif = fl4->flowi4_oif;
+ unsigned int flags = 0;
+ struct rtable *rth;
+ int err = -ENETUNREACH;
+
if (fl4->saddr) {
rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
@@ -2362,15 +2383,15 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
dev_out = net->loopback_dev;
fl4->flowi4_oif = LOOPBACK_IFINDEX;
- res.type = RTN_LOCAL;
+ res->type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
- err = fib_lookup(net, fl4, &res, 0);
+ err = fib_lookup(net, fl4, res, 0);
if (err) {
- res.fi = NULL;
- res.table = NULL;
+ res->fi = NULL;
+ res->table = NULL;
if (fl4->flowi4_oif &&
(ipv4_is_multicast(fl4->daddr) ||
!netif_index_is_l3_master(net, fl4->flowi4_oif))) {
@@ -2395,43 +2416,41 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
if (fl4->saddr == 0)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
- res.type = RTN_UNICAST;
+ res->type = RTN_UNICAST;
goto make_route;
}
rth = ERR_PTR(err);
goto out;
}
- if (res.type == RTN_LOCAL) {
+ if (res->type == RTN_LOCAL) {
if (!fl4->saddr) {
- if (res.fi->fib_prefsrc)
- fl4->saddr = res.fi->fib_prefsrc;
+ if (res->fi->fib_prefsrc)
+ fl4->saddr = res->fi->fib_prefsrc;
else
fl4->saddr = fl4->daddr;
}
/* L3 master device is the loopback for that domain */
- dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(res)) ? :
+ dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
}
- fib_select_path(net, &res, fl4, skb);
+ fib_select_path(net, res, fl4, skb);
- dev_out = FIB_RES_DEV(res);
+ dev_out = FIB_RES_DEV(*res);
fl4->flowi4_oif = dev_out->ifindex;
make_route:
- rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
+ rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
out:
- rcu_read_unlock();
return rth;
}
-EXPORT_SYMBOL_GPL(__ip_route_output_key_hash);
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
@@ -2525,18 +2544,18 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
+/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
- u32 seq, int event)
+ u32 seq, struct rtable *rt)
{
- struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
u32 error;
u32 metrics[RTAX_MAX];
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), 0);
+ nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
if (!nlh)
return -EMSGSIZE;
@@ -2644,6 +2663,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct net *net = sock_net(in_skb->sk);
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
+ struct fib_result res = {};
struct rtable *rt = NULL;
struct flowi4 fl4;
__be32 dst = 0;
@@ -2700,10 +2720,12 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
+ rcu_read_lock();
+
if (iif) {
struct net_device *dev;
- dev = __dev_get_by_index(net, iif);
+ dev = dev_get_by_index_rcu(net, iif);
if (!dev) {
err = -ENODEV;
goto errout_free;
@@ -2712,14 +2734,14 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
skb->mark = mark;
- err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
+ err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
+ dev, &res);
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
- rt = ip_route_output_key(net, &fl4);
-
+ rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
@@ -2728,24 +2750,32 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err)
goto errout_free;
- skb_dst_set(skb, &rt->dst);
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
table_id = rt->rt_table_id;
- err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
- RTM_NEWROUTE);
+ if (rtm->rtm_flags & RTM_F_FIB_MATCH)
+ err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
+ rt->rt_type, res.prefix, res.prefixlen,
+ fl4.flowi4_tos, res.fi, 0);
+ else
+ err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
+ NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
+ rt);
if (err < 0)
goto errout_free;
+ rcu_read_unlock();
+
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
errout_free:
+ rcu_read_unlock();
kfree_skb(skb);
goto errout;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0257d965f111..6426250a58ea 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
-__u32 cookie_init_timestamp(struct request_sock *req)
+u64 cookie_init_timestamp(struct request_sock *req)
{
struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_time_stamp;
+ u32 ts, ts_now = tcp_time_stamp_raw();
u32 options = 0;
ireq = inet_rsk(req);
@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
ts <<= TSBITS;
ts |= options;
}
- return ts;
+ return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
}
@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
- treq->snt_synack.v64 = 0;
+ treq->snt_synack = 0;
treq->tfo_listener = false;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 59792d283ff8..f7be94fc8431 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,7 +386,7 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U);
+ minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -2186,7 +2186,7 @@ adjudge_to_death:
/* Now socket is owned by kernel and we acquire BH lock
- to finish close. No need to check for user refs.
+ * to finish close. No need to check for user refs.
*/
local_bh_disable();
bh_lock_sock(sk);
@@ -2478,7 +2478,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_MAXSEG:
/* Values greater than interface MTU won't take effect. However
* at the point when this call is done we typically don't yet
- * know which interface is going to be used */
+ * know which interface is going to be used
+ */
if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
err = -EINVAL;
break;
@@ -2713,7 +2714,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (!tp->repair)
err = -EPERM;
else
- tp->tsoffset = val - tcp_time_stamp;
+ tp->tsoffset = val - tcp_time_stamp_raw();
break;
case TCP_REPAIR_WINDOW:
err = tcp_repair_set_window(tp, optval, optlen);
@@ -2764,7 +2765,7 @@ static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
stats[i] = tp->chrono_stat[i - 1];
if (i == tp->chrono_type)
- stats[i] += tcp_time_stamp - tp->chrono_start;
+ stats[i] += tcp_jiffies32 - tp->chrono_start;
stats[i] *= USEC_PER_SEC / HZ;
total += stats[i];
}
@@ -2848,7 +2849,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
- now = tcp_time_stamp;
+ now = tcp_jiffies32;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
@@ -3079,7 +3080,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_TIMESTAMP:
- val = tcp_time_stamp + tp->tsoffset;
+ val = tcp_time_stamp_raw() + tp->tsoffset;
break;
case TCP_NOTSENT_LOWAT:
val = tp->notsent_lowat;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index b89bce4c721e..dbcc9352a48f 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -52,10 +52,9 @@
* There is a public e-mail list for discussing BBR development and testing:
* https://groups.google.com/forum/#!forum/bbr-dev
*
- * NOTE: BBR *must* be used with the fq qdisc ("man tc-fq") with pacing enabled,
- * since pacing is integral to the BBR design and implementation.
- * BBR without pacing would not function properly, and may incur unnecessary
- * high packet loss rates.
+ * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
+ * otherwise TCP stack falls back to an internal pacing using one high
+ * resolution timer per TCP socket and may use more resources.
*/
#include <linux/module.h>
#include <net/tcp.h>
@@ -92,7 +91,7 @@ struct bbr {
struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
u32 rtt_cnt; /* count of packet-timed rounds elapsed */
u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
- struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */
+ u64 cycle_mstamp; /* time of this cycle phase start */
u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */
@@ -412,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
bool is_full_length =
- skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) >
+ tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
bbr->min_rtt_us;
u32 inflight, bw;
@@ -498,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies;
+ bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
bbr->lt_last_delivered = tp->delivered;
bbr->lt_last_lost = tp->lost;
bbr->lt_rtt_cnt = 0;
@@ -552,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
struct bbr *bbr = inet_csk_ca(sk);
u32 lost, delivered;
u64 bw;
- s32 t;
+ u32 t;
if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
@@ -604,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
return;
/* Find average delivery rate in this sampling interval. */
- t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp);
- if (t < 1)
- return; /* interval is less than one jiffy, so wait */
- t = jiffies_to_usecs(t);
- /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */
- if (t < 1) {
+ t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
+ if ((s32)t < 1)
+ return; /* interval is less than one ms, so wait */
+ /* Check if can multiply without overflow */
+ if (t >= ~0U / USEC_PER_MSEC) {
bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
return;
}
+ t *= USEC_PER_MSEC;
bw = (u64)delivered * BW_UNIT;
do_div(bw, t);
bbr_lt_bw_interval_done(sk, bw);
@@ -731,12 +730,12 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
bool filter_expired;
/* Track min RTT seen in the min_rtt_win_sec filter window: */
- filter_expired = after(tcp_time_stamp,
+ filter_expired = after(tcp_jiffies32,
bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
if (rs->rtt_us >= 0 &&
(rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
bbr->min_rtt_us = rs->rtt_us;
- bbr->min_rtt_stamp = tcp_time_stamp;
+ bbr->min_rtt_stamp = tcp_jiffies32;
}
if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
@@ -755,7 +754,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
/* Maintain min packets in flight for max(200 ms, 1 round). */
if (!bbr->probe_rtt_done_stamp &&
tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
- bbr->probe_rtt_done_stamp = tcp_time_stamp +
+ bbr->probe_rtt_done_stamp = tcp_jiffies32 +
msecs_to_jiffies(bbr_probe_rtt_mode_ms);
bbr->probe_rtt_round_done = 0;
bbr->next_rtt_delivered = tp->delivered;
@@ -763,8 +762,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
if (bbr->round_start)
bbr->probe_rtt_round_done = 1;
if (bbr->probe_rtt_round_done &&
- after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) {
- bbr->min_rtt_stamp = tcp_time_stamp;
+ after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
+ bbr->min_rtt_stamp = tcp_jiffies32;
bbr->restore_cwnd = 1; /* snap to prior_cwnd */
bbr_reset_mode(sk);
}
@@ -811,7 +810,7 @@ static void bbr_init(struct sock *sk)
bbr->probe_rtt_done_stamp = 0;
bbr->probe_rtt_round_done = 0;
bbr->min_rtt_us = tcp_min_rtt(tp);
- bbr->min_rtt_stamp = tcp_time_stamp;
+ bbr->min_rtt_stamp = tcp_jiffies32;
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
@@ -826,10 +825,12 @@ static void bbr_init(struct sock *sk)
bbr->idle_restart = 0;
bbr->full_bw = 0;
bbr->full_bw_cnt = 0;
- bbr->cycle_mstamp.v64 = 0;
+ bbr->cycle_mstamp = 0;
bbr->cycle_idx = 0;
bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk);
+
+ cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
}
static u32 bbr_sndbuf_expand(struct sock *sk)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 36087bca9f48..609965f0e298 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -84,14 +84,14 @@ static void bictcp_init(struct sock *sk)
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
if (ca->last_cwnd == cwnd &&
- (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
+ (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return;
ca->last_cwnd = cwnd;
- ca->last_time = tcp_time_stamp;
+ ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) /* record the beginning of an epoch */
- ca->epoch_start = tcp_time_stamp;
+ ca->epoch_start = tcp_jiffies32;
/* start off normal */
if (cwnd <= low_window) {
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0683ba447d77..57ae5b5ae643 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -155,7 +155,7 @@ static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{
if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_jiffies32;
s32 delta;
delta = now - tcp_sk(sk)->lsndtime;
@@ -231,21 +231,21 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
ca->ack_cnt += acked; /* count the number of ACKed packets */
if (ca->last_cwnd == cwnd &&
- (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
+ (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return;
/* The CUBIC function can update ca->cnt at most once per jiffy.
* On all cwnd reduction events, ca->epoch_start is set to 0,
* which will force a recalculation of ca->cnt.
*/
- if (ca->epoch_start && tcp_time_stamp == ca->last_time)
+ if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
goto tcp_friendliness;
ca->last_cwnd = cwnd;
- ca->last_time = tcp_time_stamp;
+ ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) {
- ca->epoch_start = tcp_time_stamp; /* record beginning */
+ ca->epoch_start = tcp_jiffies32; /* record beginning */
ca->ack_cnt = acked; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */
@@ -276,7 +276,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
* if the cwnd < 1 million packets !!!
*/
- t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t = (s32)(tcp_jiffies32 - ca->epoch_start);
t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
t <<= BICTCP_HZ;
@@ -448,7 +448,7 @@ static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
return;
/* Discard delay samples right after fast recovery */
- if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
return;
delay = (sample->rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4a4d8e76738f..3eb78cde6ff0 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -104,7 +104,7 @@ static void measure_achieved_throughput(struct sock *sk,
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_jiffies32;
if (icsk->icsk_ca_state == TCP_CA_Open)
ca->pkts_acked = sample->pkts_acked;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 174d4376baa5..9f4380662196 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -112,6 +112,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -441,7 +442,7 @@ void tcp_init_buffer_space(struct sock *sk)
tcp_sndbuf_expand(sk);
tp->rcvq_space.space = tp->rcv_wnd;
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@@ -463,7 +464,7 @@ void tcp_init_buffer_space(struct sock *sk)
tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
/* 5. Recalculate window clamp after socket hit its memory bounds. */
@@ -555,11 +556,11 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
{
u32 delta_us;
- if (tp->rcv_rtt_est.time.v64 == 0)
+ if (tp->rcv_rtt_est.time == 0)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
- delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time);
+ delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
tcp_rcv_rtt_update(tp, delta_us, 1);
new_measure:
@@ -571,13 +572,15 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+
if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq -
- TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
- tcp_rcv_rtt_update(tp,
- jiffies_to_usecs(tcp_time_stamp -
- tp->rx_opt.rcv_tsecr),
- 0);
+ TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+ u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+
+ tcp_rcv_rtt_update(tp, delta_us, 0);
+ }
}
/*
@@ -590,7 +593,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
int time;
int copied;
- time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time);
+ time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
return;
@@ -672,7 +675,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
tcp_rcv_rtt_measure(tp);
- now = tcp_time_stamp;
+ now = tcp_jiffies32;
if (!icsk->icsk_ack.ato) {
/* The _first_ data packet received, initialize
@@ -885,6 +888,9 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
struct tcp_sock *tp = tcp_sk(sk);
int mib_idx;
+ if (WARN_ON_ONCE(metric < 0))
+ return;
+
if (metric > tp->reordering) {
tp->reordering = min(sysctl_tcp_max_reordering, metric);
@@ -1134,8 +1140,8 @@ struct tcp_sacktag_state {
* that was SACKed. RTO needs the earliest RTT to stay conservative,
* but congestion control should still get an accurate delay signal.
*/
- struct skb_mstamp first_sackt;
- struct skb_mstamp last_sackt;
+ u64 first_sackt;
+ u64 last_sackt;
struct rate_sample *rate;
int flag;
};
@@ -1200,7 +1206,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
int dup_sack, int pcount,
- const struct skb_mstamp *xmit_time)
+ u64 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1242,9 +1248,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
- if (state->first_sackt.v64 == 0)
- state->first_sackt = *xmit_time;
- state->last_sackt = *xmit_time;
+ if (state->first_sackt == 0)
+ state->first_sackt = xmit_time;
+ state->last_sackt = xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1304,7 +1310,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
- &skb->skb_mstamp);
+ skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint)
@@ -1356,8 +1362,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_advance_highest_sack(sk, skb);
tcp_skb_collapse_tstamp(prev, skb);
- if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
- TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
+ if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
+ TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
@@ -1587,7 +1593,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
tcp_skb_pcount(skb),
- &skb->skb_mstamp);
+ skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate);
if (!before(TCP_SKB_CB(skb)->seq,
@@ -1954,7 +1960,7 @@ void tcp_enter_loss(struct sock *sk)
}
tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->retrans_out = 0;
tp->lost_out = 0;
@@ -2383,7 +2389,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
tcp_ecn_withdraw_cwr(tp);
}
}
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->undo_marker = 0;
}
@@ -2520,7 +2526,7 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
tp->snd_cwnd = tp->snd_ssthresh;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
@@ -2590,7 +2596,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
tcp_mss_to_mtu(sk, tp->mss_cache) /
icsk->icsk_mtup.probe_size;
tp->snd_cwnd_cnt = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
@@ -2911,7 +2917,7 @@ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
struct tcp_sock *tp = tcp_sk(sk);
u32 wlen = sysctl_tcp_min_rtt_wlen * HZ;
- minmax_running_min(&tp->rtt_min, wlen, tcp_time_stamp,
+ minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
rtt_us ? : jiffies_to_usecs(1));
}
@@ -2936,9 +2942,12 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
- flag & FLAG_ACKED)
- seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
- tp->rx_opt.rcv_tsecr);
+ flag & FLAG_ACKED) {
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+ u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+
+ seq_rtt_us = ca_rtt_us = delta_us;
+ }
if (seq_rtt_us < 0)
return false;
@@ -2960,12 +2969,8 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{
long rtt_us = -1L;
- if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) {
- struct skb_mstamp now;
-
- skb_mstamp_get(&now);
- rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
- }
+ if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
+ rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
}
@@ -2976,7 +2981,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
const struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
- tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
+ tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
}
/* Restart timer after forward progress on connection.
@@ -3001,14 +3006,14 @@ void tcp_rearm_rto(struct sock *sk)
if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk);
- const u32 rto_time_stamp =
- tcp_skb_timestamp(skb) + rto;
- s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
- /* delta may not be positive if the socket is locked
+ u64 rto_time_stamp = skb->skb_mstamp +
+ jiffies_to_usecs(rto);
+ s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
+ /* delta_us may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
*/
- if (delta > 0)
- rto = delta;
+ if (delta_us > 0)
+ rto = usecs_to_jiffies(delta_us);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX);
@@ -3060,9 +3065,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
struct tcp_sacktag_state *sack)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct skb_mstamp first_ackt, last_ackt;
+ u64 first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk);
- struct skb_mstamp *now = &tp->tcp_mstamp;
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
bool fully_acked = true;
@@ -3075,7 +3079,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
bool rtt_update;
int flag = 0;
- first_ackt.v64 = 0;
+ first_ackt = 0;
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3106,8 +3110,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
flag |= FLAG_RETRANS_DATA_ACKED;
} else if (!(sacked & TCPCB_SACKED_ACKED)) {
last_ackt = skb->skb_mstamp;
- WARN_ON_ONCE(last_ackt.v64 == 0);
- if (!first_ackt.v64)
+ WARN_ON_ONCE(last_ackt == 0);
+ if (!first_ackt)
first_ackt = last_ackt;
last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
@@ -3122,7 +3126,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb))
tcp_rack_advance(tp, sacked, scb->end_seq,
- &skb->skb_mstamp);
+ skb->skb_mstamp);
}
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3165,13 +3169,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
- seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt);
- ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt);
+ if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
+ seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
+ ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
}
- if (sack->first_sackt.v64) {
- sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt);
- ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt);
+ if (sack->first_sackt) {
+ sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
+ ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
}
sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
@@ -3201,7 +3205,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
- sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) {
+ sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3390,7 +3394,7 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
u32 *last_oow_ack_time)
{
if (*last_oow_ack_time) {
- s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+ s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
NET_INC_STATS(net, mib_idx);
@@ -3398,7 +3402,7 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
}
}
- *last_oow_ack_time = tcp_time_stamp;
+ *last_oow_ack_time = tcp_jiffies32;
return false; /* not rate-limited: go ahead, send dupack now! */
}
@@ -3553,7 +3557,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
- sack_state.first_sackt.v64 = 0;
+ sack_state.first_sackt = 0;
sack_state.rate = &rs;
/* We very likely will need to access write queue head. */
@@ -3565,7 +3569,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una)) {
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - tp->max_window)) {
- tcp_send_challenge_ack(sk, skb);
+ if (!(flag & FLAG_NO_CHALLENGE_ACK))
+ tcp_send_challenge_ack(sk, skb);
return -1;
}
goto old_ack;
@@ -3636,7 +3641,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
*/
sk->sk_err_soft = 0;
icsk->icsk_probes_out = 0;
- tp->rcv_tstamp = tcp_time_stamp;
+ tp->rcv_tstamp = tcp_jiffies32;
if (!prior_packets)
goto no_queue;
@@ -5019,7 +5024,7 @@ static void tcp_new_space(struct sock *sk)
if (tcp_should_expand_sndbuf(sk)) {
tcp_sndbuf_expand(sk);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
sk->sk_write_space(sk);
@@ -5356,7 +5361,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
if (unlikely(!sk->sk_rx_dst))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
@@ -5554,7 +5559,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ icsk->icsk_ack.lrcvtime = tcp_jiffies32;
if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5571,7 +5576,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
/* Prevent spurious tcp_cwnd_restart() on first data
* packet.
*/
- tp->lsndtime = tcp_time_stamp;
+ tp->lsndtime = tcp_jiffies32;
tcp_init_buffer_space(sk);
@@ -5672,7 +5677,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
- tcp_time_stamp)) {
+ tcp_time_stamp(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo;
@@ -5917,7 +5922,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_SYN_SENT:
tp->rx_opt.saw_tstamp = 0;
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th);
if (queued >= 0)
return queued;
@@ -5929,7 +5934,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
}
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
tp->rx_opt.saw_tstamp = 0;
req = tp->fastopen_rsk;
if (req) {
@@ -5948,13 +5953,17 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
/* step 5: check the ACK field */
acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT) > 0;
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK) > 0;
+ if (!acceptable) {
+ if (sk->sk_state == TCP_SYN_RECV)
+ return 1; /* send one RST */
+ tcp_send_challenge_ack(sk, skb);
+ goto discard;
+ }
switch (sk->sk_state) {
case TCP_SYN_RECV:
- if (!acceptable)
- return 1;
-
if (!tp->srtt_us)
tcp_synack_rtt_meas(sk, req);
@@ -6008,7 +6017,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_update_pacing_rate(sk);
/* Prevent spurious tcp_cwnd_restart() on first data packet */
- tp->lsndtime = tcp_time_stamp;
+ tp->lsndtime = tcp_jiffies32;
tcp_initialize_rcv_mss(sk);
tcp_fast_path_on(tp);
@@ -6023,14 +6032,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
* our SYNACK so stop the SYNACK timer.
*/
if (req) {
- /* Return RST if ack_seq is invalid.
- * Note that RFC793 only says to generate a
- * DUPACK for it but for TCP Fast Open it seems
- * better to treat this case like TCP_SYN_RECV
- * above.
- */
- if (!acceptable)
- return 1;
/* We no longer need the request sock. */
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
@@ -6202,7 +6203,7 @@ static void tcp_openreq_init(struct request_sock *req,
req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- skb_mstamp_get(&tcp_rsk(req)->snt_synack);
+ tcp_rsk(req)->snt_synack = tcp_clock_us();
tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5ab2aac5ca19..191b2f78b19d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
struct sock *sk;
struct sk_buff *skb;
struct request_sock *fastopen;
- __u32 seq, snd_una;
- __u32 remaining;
+ u32 seq, snd_una;
+ s32 remaining;
+ u32 delta_us;
int err;
struct net *net = dev_net(icmp_skb->dev);
@@ -483,11 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
skb = tcp_write_queue_head(sk);
BUG_ON(!skb);
+ tcp_mstamp_refresh(tp);
+ delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
remaining = icsk->icsk_rto -
- min(icsk->icsk_rto,
- tcp_time_stamp - tcp_skb_timestamp(skb));
+ usecs_to_jiffies(delta_us);
- if (remaining) {
+ if (remaining > 0) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX);
} else {
@@ -811,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp + tcptw->tw_ts_offset,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
@@ -839,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp + tcp_rsk(req)->ts_off,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent,
0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index d6fb6c067af4..ae10ed64fe13 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -37,7 +37,7 @@
#include <net/tcp.h>
/* resolution of owd */
-#define LP_RESOL 1000
+#define LP_RESOL TCP_TS_HZ
/**
* enum tcp_lp_state
@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
- m = HZ * (tp->rx_opt.rcv_tsval -
- lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr -
- lp->local_ref_time);
+ m = TCP_TS_HZ *
+ (tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
+ (tp->rx_opt.rcv_tsecr - lp->local_ref_time);
if (m < 0)
m = -m;
@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
if (lp->flag & LP_VALID_RHZ) {
owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
- tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ);
+ tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
if (owd < 0)
owd = -owd;
}
@@ -264,18 +264,19 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
+ u32 now = tcp_time_stamp(tp);
u32 delta;
if (sample->rtt_us > 0)
tcp_lp_rtt_sample(sk, sample->rtt_us);
/* calc inference */
- delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+ delta = now - tp->rx_opt.rcv_tsecr;
if ((s32)delta > 0)
lp->inference = 3 * delta;
/* test if within inference */
- if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
+ if (lp->last_drop && (now - lp->last_drop < lp->inference))
lp->flag |= LP_WITHIN_INF;
else
lp->flag &= ~LP_WITHIN_INF;
@@ -312,7 +313,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
/* record this drop time */
- lp->last_drop = tcp_time_stamp;
+ lp->last_drop = now;
}
static struct tcp_congestion_ops tcp_lp __read_mostly = {
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 653bbd67e3a3..102b2c90bb80 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -524,7 +524,7 @@ reset:
tp->snd_cwnd = 1;
else
tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 717be4de5324..d0642df73044 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -445,9 +445,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
+ minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
- newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
newtp->packets_out = 0;
newtp->retrans_out = 0;
@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0;
- newtp->lsndtime = treq->snt_synack.stamp_jiffies;
+ newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash;
newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
- newtp->rack.mstamp.v64 = 0;
+ newtp->rack.mstamp = 0;
newtp->rack.advanced = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index 5de82a8d4d87..6d650ed3cb59 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -424,8 +424,8 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
}
/* Extract info for Tcp socket info provided via netlink */
-size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
- union tcp_cc_info *info)
+static size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
{
const struct tcpnv *ca = inet_csk_ca(sk);
@@ -440,7 +440,6 @@ size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
}
return 0;
}
-EXPORT_SYMBOL_GPL(tcpnv_get_info);
static struct tcp_congestion_ops tcpnv __read_mostly = {
.init = tcpnv_init,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4858e190f6ac..478f75baee31 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -151,7 +151,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1;
tp->snd_cwnd = max(cwnd, restart_cwnd);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_cwnd_used = 0;
}
@@ -160,7 +160,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- const u32 now = tcp_time_stamp;
+ const u32 now = tcp_jiffies32;
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
@@ -904,6 +904,72 @@ out:
sk_free(sk);
}
+/* Note: Called under hard irq.
+ * We can not call TCP stack right away.
+ */
+enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
+{
+ struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
+ struct sock *sk = (struct sock *)tp;
+ unsigned long nval, oval;
+
+ for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
+ struct tsq_tasklet *tsq;
+ bool empty;
+
+ if (oval & TSQF_QUEUED)
+ break;
+
+ nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
+ nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
+ if (nval != oval)
+ continue;
+
+ if (!atomic_inc_not_zero(&sk->sk_wmem_alloc))
+ break;
+ /* queue this socket to tasklet queue */
+ tsq = this_cpu_ptr(&tsq_tasklet);
+ empty = list_empty(&tsq->head);
+ list_add(&tp->tsq_node, &tsq->head);
+ if (empty)
+ tasklet_schedule(&tsq->tasklet);
+ break;
+ }
+ return HRTIMER_NORESTART;
+}
+
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+ return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
+static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
+{
+ u64 len_ns;
+ u32 rate;
+
+ if (!tcp_needs_internal_pacing(sk))
+ return;
+ rate = sk->sk_pacing_rate;
+ if (!rate || rate == ~0U)
+ return;
+
+ /* Should account for header sizes as sch_fq does,
+ * but lets make things simple.
+ */
+ len_ns = (u64)skb->len * NSEC_PER_SEC;
+ do_div(len_ns, rate);
+ hrtimer_start(&tcp_sk(sk)->pacing_timer,
+ ktime_add_ns(ktime_get(), len_ns),
+ HRTIMER_MODE_ABS_PINNED);
+}
+
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
@@ -931,8 +997,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
BUG_ON(!skb || !tcp_skb_pcount(skb));
tp = tcp_sk(sk);
+ skb->skb_mstamp = tp->tcp_mstamp;
if (clone_it) {
- skb_mstamp_get(&skb->skb_mstamp);
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
tcp_rate_skb_sent(sk, skb);
@@ -1034,6 +1100,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);
tp->data_segs_out += tcp_skb_pcount(skb);
+ tcp_internal_pacing(sk, skb);
}
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
@@ -1408,7 +1475,7 @@ void tcp_mtup_init(struct sock *sk)
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0;
if (icsk->icsk_mtup.enabled)
- icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
}
EXPORT_SYMBOL(tcp_mtup_init);
@@ -1509,7 +1576,7 @@ static void tcp_cwnd_application_limited(struct sock *sk)
}
tp->snd_cwnd_used = 0;
}
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
}
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
@@ -1530,14 +1597,14 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
if (tcp_is_cwnd_limited(sk)) {
/* Network is feed fully. */
tp->snd_cwnd_used = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_jiffies32;
} else {
/* Network starves. */
if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out;
if (sysctl_tcp_slow_start_after_idle &&
- (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
+ (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
!ca_ops->cong_control)
tcp_cwnd_application_limited(sk);
@@ -1839,7 +1906,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 age, send_win, cong_win, limit, in_flight;
struct tcp_sock *tp = tcp_sk(sk);
- struct skb_mstamp now;
struct sk_buff *head;
int win_divisor;
@@ -1852,7 +1918,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
/* Avoid bursty behavior by allowing defer
* only if the last write was recent.
*/
- if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
+ if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1895,8 +1961,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
}
head = tcp_write_queue_head(sk);
- skb_mstamp_get(&now);
- age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
+
+ age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
/* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4))
goto send_now;
@@ -1921,7 +1987,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
s32 delta;
interval = net->ipv4.sysctl_tcp_probe_interval;
- delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
+ delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
if (unlikely(delta >= interval * HZ)) {
int mss = tcp_current_mss(sk);
@@ -1933,7 +1999,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
/* Update probe time stamp */
- icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
}
}
@@ -2086,6 +2152,12 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
+static bool tcp_pacing_check(const struct sock *sk)
+{
+ return tcp_needs_internal_pacing(sk) &&
+ hrtimer_active(&tcp_sk(sk)->pacing_timer);
+}
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)
@@ -2130,7 +2202,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
{
- const u32 now = tcp_time_stamp;
+ const u32 now = tcp_jiffies32;
if (tp->chrono_type > TCP_CHRONO_UNSPEC)
tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start;
@@ -2207,15 +2279,19 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
max_segs = tcp_tso_segs(sk, mss_now);
+ tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
+ if (tcp_pacing_check(sk))
+ break;
+
tso_segs = tcp_init_tso_segs(skb, mss_now);
BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp" is used as a start point for the retransmit timer */
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tp->tcp_mstamp;
goto repair; /* Skip network transmission */
}
@@ -2342,10 +2418,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
timeout = max_t(u32, timeout, msecs_to_jiffies(10));
/* If RTO is shorter, just schedule TLP in its place. */
- tlp_time_stamp = tcp_time_stamp + timeout;
+ tlp_time_stamp = tcp_jiffies32 + timeout;
rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
- s32 delta = rto_time_stamp - tcp_time_stamp;
+ s32 delta = rto_time_stamp - tcp_jiffies32;
if (delta > 0)
timeout = delta;
}
@@ -2803,7 +2879,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb;
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tp->tcp_mstamp;
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
@@ -2878,6 +2954,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == tcp_send_head(sk))
break;
+
+ if (tcp_pacing_check(sk))
+ break;
+
/* we could do better than to assign each time */
if (!hole)
tp->retransmit_skb_hint = skb;
@@ -3015,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
- skb_mstamp_get(&skb->skb_mstamp);
+ tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
@@ -3111,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
+ skb->skb_mstamp = cookie_init_timestamp(req);
else
#endif
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tcp_clock_us();
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
@@ -3244,7 +3324,7 @@ static void tcp_connect_init(struct sock *sk)
if (likely(!tp->repair))
tp->rcv_nxt = 0;
else
- tp->rcv_tstamp = tcp_time_stamp;
+ tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt;
@@ -3373,7 +3453,8 @@ int tcp_connect(struct sock *sk)
return -ENOBUFS;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
- tp->retrans_stamp = tcp_time_stamp;
+ tcp_mstamp_refresh(tp);
+ tp->retrans_stamp = tcp_time_stamp(tp);
tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff);
@@ -3492,7 +3573,6 @@ void tcp_send_ack(struct sock *sk)
skb_set_tcp_pure_ack(buff);
/* Send it off, this clears delayed acks for us. */
- skb_mstamp_get(&buff->skb_mstamp);
tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
}
EXPORT_SYMBOL_GPL(tcp_send_ack);
@@ -3526,15 +3606,16 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
* send it.
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
- skb_mstamp_get(&skb->skb_mstamp);
NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
}
+/* Called from setsockopt( ... TCP_REPAIR ) */
void tcp_send_window_probe(struct sock *sk)
{
if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+ tcp_mstamp_refresh(tcp_sk(sk));
tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
}
}
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index c6a9fa894646..ad99569d4c1e 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
- if (!scb->tx.delivered_mstamp.v64)
+ if (!scb->tx.delivered_mstamp)
return;
if (!rs->prior_delivered ||
@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
/* Find the duration of the "send phase" of this window: */
- rs->interval_us = skb_mstamp_us_delta(
- &skb->skb_mstamp,
- &scb->tx.first_tx_mstamp);
+ rs->interval_us = tcp_stamp_us_delta(
+ skb->skb_mstamp,
+ scb->tx.first_tx_mstamp);
/* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = skb->skb_mstamp;
@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
* we don't need to reset since it'll be freed soon.
*/
if (scb->sacked & TCPCB_SACKED_ACKED)
- scb->tx.delivered_mstamp.v64 = 0;
+ scb->tx.delivered_mstamp = 0;
}
/* Update the connection delivery information and generate a rate sample. */
@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */
/* Return an invalid sample if no timing information is available. */
- if (!rs->prior_mstamp.v64) {
+ if (!rs->prior_mstamp) {
rs->delivered = -1;
rs->interval_us = -1;
return;
@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
* longer phase.
*/
snd_us = rs->interval_us; /* send phase */
- ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp,
- &rs->prior_mstamp); /* ack phase */
+ ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
+ rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us);
/* Normally we expect interval_us >= min-rtt.
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 362b8c75bfab..fe9a493d0208 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
}
}
-static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
- const struct skb_mstamp *t2,
- u32 seq1, u32 seq2)
+static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
- return skb_mstamp_after(t1, t2) ||
- (t1->v64 == t2->v64 && after(seq1, seq2));
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
}
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
scb->sacked & TCPCB_SACKED_ACKED)
continue;
- if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
+ if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
tp->rack.end_seq, scb->end_seq)) {
/* Step 3 in draft-cheng-tcpm-rack-00.txt:
* A packet is lost if its elapsed time is beyond
* the recent RTT plus the reordering window.
*/
- u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp,
- &skb->skb_mstamp);
+ u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
+ skb->skb_mstamp);
s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
if (remaining < 0) {
@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk)
* draft-cheng-tcpm-rack-00.txt
*/
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- const struct skb_mstamp *xmit_time)
+ u64 xmit_time)
{
u32 rtt_us;
- if (tp->rack.mstamp.v64 &&
- !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
+ if (tp->rack.mstamp &&
+ !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
end_seq, tp->rack.end_seq))
return;
- rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time);
+ rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
if (sacked & TCPCB_RETRANS) {
/* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior
@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
return;
}
tp->rack.rtt_us = rtt_us;
- tp->rack.mstamp = *xmit_time;
+ tp->rack.mstamp = xmit_time;
tp->rack.end_seq = end_seq;
tp->rack.advanced = 1;
}
@@ -166,7 +163,6 @@ void tcp_rack_reo_timeout(struct sock *sk)
u32 timeout, prior_inflight;
prior_inflight = tcp_packets_in_flight(tp);
- skb_mstamp_get(&tp->tcp_mstamp);
tcp_rack_detect_loss(sk, &timeout);
if (prior_inflight != tcp_packets_in_flight(tp)) {
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 14672543cf0b..c0feeeef962a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -63,7 +63,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
- if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
+ if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
@@ -73,7 +73,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
if (tcp_check_oom(sk, shift)) {
/* Catch exceptional cases, when connection requires reset.
* 1. Last segment was sent recently. */
- if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
+ if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
/* 2. Window is closed. */
(!tp->snd_wnd && !tp->packets_out))
do_reset = true;
@@ -115,7 +115,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
if (net->ipv4.sysctl_tcp_mtu_probing) {
if (!icsk->icsk_mtup.enabled) {
icsk->icsk_mtup.enabled = 1;
- icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
+ icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else {
struct net *net = sock_net(sk);
@@ -139,22 +139,18 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
* @timeout: A custom timeout value.
* If set to 0 the default timeout is calculated and used.
* Using TCP_RTO_MIN and the number of unsuccessful retransmits.
- * @syn_set: true if the SYN Bit was set.
*
* The default "timeout" value this function can calculate and use
* is equivalent to the timeout of a TCP Connection
* after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
- * syn_set flag is set.
- *
+ * retransmissions with an initial RTO of TCP_RTO_MIN.
*/
static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
- unsigned int timeout,
- bool syn_set)
+ unsigned int timeout)
{
+ const unsigned int rto_base = TCP_RTO_MIN;
unsigned int linear_backoff_thresh, start_ts;
- unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -172,7 +168,7 @@ static bool retransmits_timed_out(struct sock *sk,
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
}
- return (tcp_time_stamp - start_ts) >= timeout;
+ return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
}
/* A write timeout has occurred. Process the after effects. */
@@ -181,8 +177,8 @@ static int tcp_write_timeout(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
+ bool expired, do_reset;
int retry_until;
- bool do_reset, syn_set = false;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) {
@@ -196,9 +192,9 @@ static int tcp_write_timeout(struct sock *sk)
sk_rethink_txhash(sk);
}
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
- syn_set = true;
+ expired = icsk->icsk_retransmits >= retry_until;
} else {
- if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
+ if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
/* Some middle-boxes may black-hole Fast Open _after_
* the handshake. Therefore we conservatively disable
* Fast Open on this path on recurring timeouts after
@@ -224,15 +220,15 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
- !retransmits_timed_out(sk, retry_until, 0, 0);
+ !retransmits_timed_out(sk, retry_until, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
}
+ expired = retransmits_timed_out(sk, retry_until,
+ icsk->icsk_user_timeout);
}
-
- if (retransmits_timed_out(sk, retry_until,
- syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
+ if (expired) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -339,9 +335,10 @@ static void tcp_probe_timer(struct sock *sk)
*/
start_ts = tcp_skb_timestamp(tcp_send_head(sk));
if (!start_ts)
- skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
+ tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
else if (icsk->icsk_user_timeout &&
- (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
+ (s32)(tcp_time_stamp(tp) - start_ts) >
+ jiffies_to_msecs(icsk->icsk_user_timeout))
goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
@@ -451,7 +448,7 @@ void tcp_retransmit_timer(struct sock *sk)
tp->snd_una, tp->snd_nxt);
}
#endif
- if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
+ if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
goto out;
}
@@ -539,7 +536,7 @@ out_reset_timer:
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
+ if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
__sk_dst_reset(sk);
out:;
@@ -561,6 +558,7 @@ void tcp_write_timer_handler(struct sock *sk)
goto out;
}
+ tcp_mstamp_refresh(tcp_sk(sk));
event = icsk->icsk_pending;
switch (event) {
@@ -710,4 +708,7 @@ void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
+ hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED);
+ tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
}
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 9775453b8d17..bec9cafbe3f9 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -68,7 +68,7 @@ static void tcp_westwood_init(struct sock *sk)
w->cumul_ack = 0;
w->reset_rtt_min = 1;
w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
- w->rtt_win_sx = tcp_time_stamp;
+ w->rtt_win_sx = tcp_jiffies32;
w->snd_una = tcp_sk(sk)->snd_una;
w->first_ack = 1;
}
@@ -116,7 +116,7 @@ static void tcp_westwood_pkts_acked(struct sock *sk,
static void westwood_update_window(struct sock *sk)
{
struct westwood *w = inet_csk_ca(sk);
- s32 delta = tcp_time_stamp - w->rtt_win_sx;
+ s32 delta = tcp_jiffies32 - w->rtt_win_sx;
/* Initialize w->snd_una with the first acked sequence number in order
* to fix mismatch between tp->snd_una and w->snd_una for the first
@@ -140,7 +140,7 @@ static void westwood_update_window(struct sock *sk)
westwood_filter(w, delta);
w->bk = 0;
- w->rtt_win_sx = tcp_time_stamp;
+ w->rtt_win_sx = tcp_jiffies32;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1d6219bf2d6b..fdcb7437cc15 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1164,22 +1164,32 @@ out:
}
/* fully reclaim rmem/fwd memory allocated for skb */
-static void udp_rmem_release(struct sock *sk, int size, int partial)
+static void udp_rmem_release(struct sock *sk, int size, int partial,
+ bool rx_queue_lock_held)
{
struct udp_sock *up = udp_sk(sk);
+ struct sk_buff_head *sk_queue;
int amt;
if (likely(partial)) {
up->forward_deficit += size;
size = up->forward_deficit;
if (size < (sk->sk_rcvbuf >> 2) &&
- !skb_queue_empty(&sk->sk_receive_queue))
+ !skb_queue_empty(&up->reader_queue))
return;
} else {
size += up->forward_deficit;
}
up->forward_deficit = 0;
+ /* acquire the sk_receive_queue for fwd allocated memory scheduling,
+ * if the called don't held it already
+ */
+ sk_queue = &sk->sk_receive_queue;
+ if (!rx_queue_lock_held)
+ spin_lock(&sk_queue->lock);
+
+
sk->sk_forward_alloc += size;
amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
sk->sk_forward_alloc -= amt;
@@ -1188,19 +1198,31 @@ static void udp_rmem_release(struct sock *sk, int size, int partial)
__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
atomic_sub(size, &sk->sk_rmem_alloc);
+
+ /* this can save us from acquiring the rx queue lock on next receive */
+ skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
+
+ if (!rx_queue_lock_held)
+ spin_unlock(&sk_queue->lock);
}
-/* Note: called with sk_receive_queue.lock held.
+/* Note: called with reader_queue.lock held.
* Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
* This avoids a cache line miss while receive_queue lock is held.
* Look at __udp_enqueue_schedule_skb() to find where this copy is done.
*/
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
{
- udp_rmem_release(sk, skb->dev_scratch, 1);
+ udp_rmem_release(sk, skb->dev_scratch, 1, false);
}
EXPORT_SYMBOL(udp_skb_destructor);
+/* as above, but the caller held the rx queue lock, too */
+static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
+{
+ udp_rmem_release(sk, skb->dev_scratch, 1, true);
+}
+
/* Idea of busylocks is to let producers grab an extra spinlock
* to relieve pressure on the receive_queue spinlock shared by consumer.
* Under flood, this means that only one producer can be in line
@@ -1306,14 +1328,16 @@ EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
void udp_destruct_sock(struct sock *sk)
{
/* reclaim completely the forward allocated memory */
+ struct udp_sock *up = udp_sk(sk);
unsigned int total = 0;
struct sk_buff *skb;
- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+ skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
+ while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
total += skb->truesize;
kfree_skb(skb);
}
- udp_rmem_release(sk, total, 0);
+ udp_rmem_release(sk, total, 0, true);
inet_sock_destruct(sk);
}
@@ -1321,6 +1345,7 @@ EXPORT_SYMBOL_GPL(udp_destruct_sock);
int udp_init_sock(struct sock *sk)
{
+ skb_queue_head_init(&udp_sk(sk)->reader_queue);
sk->sk_destruct = udp_destruct_sock;
return 0;
}
@@ -1338,6 +1363,26 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
}
EXPORT_SYMBOL_GPL(skb_consume_udp);
+static struct sk_buff *__first_packet_length(struct sock *sk,
+ struct sk_buff_head *rcvq,
+ int *total)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_peek(rcvq)) != NULL &&
+ udp_lib_checksum_complete(skb)) {
+ __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
+ IS_UDPLITE(sk));
+ __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ atomic_inc(&sk->sk_drops);
+ __skb_unlink(skb, rcvq);
+ *total += skb->truesize;
+ kfree_skb(skb);
+ }
+ return skb;
+}
+
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
@@ -1347,26 +1392,24 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
*/
static int first_packet_length(struct sock *sk)
{
- struct sk_buff_head *rcvq = &sk->sk_receive_queue;
+ struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
+ struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
struct sk_buff *skb;
int total = 0;
int res;
spin_lock_bh(&rcvq->lock);
- while ((skb = skb_peek(rcvq)) != NULL &&
- udp_lib_checksum_complete(skb)) {
- __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
- IS_UDPLITE(sk));
- __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
- IS_UDPLITE(sk));
- atomic_inc(&sk->sk_drops);
- __skb_unlink(skb, rcvq);
- total += skb->truesize;
- kfree_skb(skb);
+ skb = __first_packet_length(sk, rcvq, &total);
+ if (!skb && !skb_queue_empty(sk_queue)) {
+ spin_lock(&sk_queue->lock);
+ skb_queue_splice_tail_init(sk_queue, rcvq);
+ spin_unlock(&sk_queue->lock);
+
+ skb = __first_packet_length(sk, rcvq, &total);
}
res = skb ? skb->len : -1;
if (total)
- udp_rmem_release(sk, total, 1);
+ udp_rmem_release(sk, total, 1, false);
spin_unlock_bh(&rcvq->lock);
return res;
}
@@ -1400,6 +1443,77 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
EXPORT_SYMBOL(udp_ioctl);
+struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
+ int noblock, int *peeked, int *off, int *err)
+{
+ struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
+ struct sk_buff_head *queue;
+ struct sk_buff *last;
+ long timeo;
+ int error;
+
+ queue = &udp_sk(sk)->reader_queue;
+ flags |= noblock ? MSG_DONTWAIT : 0;
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ do {
+ struct sk_buff *skb;
+
+ error = sock_error(sk);
+ if (error)
+ break;
+
+ error = -EAGAIN;
+ *peeked = 0;
+ do {
+ spin_lock_bh(&queue->lock);
+ skb = __skb_try_recv_from_queue(sk, queue, flags,
+ udp_skb_destructor,
+ peeked, off, err,
+ &last);
+ if (skb) {
+ spin_unlock_bh(&queue->lock);
+ return skb;
+ }
+
+ if (skb_queue_empty(sk_queue)) {
+ spin_unlock_bh(&queue->lock);
+ goto busy_check;
+ }
+
+ /* refill the reader queue and walk it again
+ * keep both queues locked to avoid re-acquiring
+ * the sk_receive_queue lock if fwd memory scheduling
+ * is needed.
+ */
+ spin_lock(&sk_queue->lock);
+ skb_queue_splice_tail_init(sk_queue, queue);
+
+ skb = __skb_try_recv_from_queue(sk, queue, flags,
+ udp_skb_dtor_locked,
+ peeked, off, err,
+ &last);
+ spin_unlock(&sk_queue->lock);
+ spin_unlock_bh(&queue->lock);
+ if (skb)
+ return skb;
+
+busy_check:
+ if (!sk_can_busy_loop(sk))
+ break;
+
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
+ } while (!skb_queue_empty(sk_queue));
+
+ /* sk_queue is empty, reader_queue may contain peeked packets */
+ } while (timeo &&
+ !__skb_wait_for_more_packets(sk, &error, &timeo,
+ (struct sk_buff *)sk_queue));
+
+ *err = error;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__skb_recv_udp);
+
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
@@ -1490,7 +1604,8 @@ try_again:
return err;
csum_copy_err:
- if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
+ if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
+ udp_skb_destructor)) {
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
}
@@ -2325,6 +2440,9 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
+ if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+ mask |= POLLIN | POLLRDNORM;
+
sock_rps_record_flow(sk);
/* Check for false positives due to checksum errors */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6a4fb1e629fb..25443fd946a8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2280,7 +2280,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
cfg.fc_flags |= RTF_NONEXTHOP;
#endif
- ip6_route_add(&cfg);
+ ip6_route_add(&cfg, NULL);
}
@@ -2335,7 +2335,7 @@ static void addrconf_add_mroute(struct net_device *dev)
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
- ip6_route_add(&cfg);
+ ip6_route_add(&cfg, NULL);
}
static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 9ea249b9451e..6de3c04b0f30 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -14,6 +14,8 @@
#include <net/udp.h>
#include <net/udp_tunnel.h>
+#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
+
static void fou6_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
struct flowi6 *fl6, u8 *protocol, __be16 sport)
{
@@ -33,8 +35,8 @@ static void fou6_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
*protocol = IPPROTO_UDP;
}
-int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi6 *fl6)
+static int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6)
{
__be16 sport;
int err;
@@ -49,10 +51,9 @@ int fou6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
return 0;
}
-EXPORT_SYMBOL(fou6_build_header);
-int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi6 *fl6)
+static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6)
{
__be16 sport;
int err;
@@ -67,9 +68,6 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
return 0;
}
-EXPORT_SYMBOL(gue6_build_header);
-
-#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
.encap_hlen = fou_encap_hlen,
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index b3df03e3faa0..f4a413aba423 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -91,7 +91,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
drop:
kfree_skb(skb);
- return -EINVAL;
+ return err;
}
static int ila_input(struct sk_buff *skb)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d4bf2c68a545..deea901746c8 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -473,7 +473,8 @@ out:
static struct fib6_node *fib6_add_1(struct fib6_node *root,
struct in6_addr *addr, int plen,
int offset, int allow_create,
- int replace_required, int sernum)
+ int replace_required, int sernum,
+ struct netlink_ext_ack *extack)
{
struct fib6_node *fn, *in, *ln;
struct fib6_node *pn = NULL;
@@ -497,6 +498,8 @@ static struct fib6_node *fib6_add_1(struct fib6_node *root,
!ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
if (!allow_create) {
if (replace_required) {
+ NL_SET_ERR_MSG(extack,
+ "Can not replace route - no match found");
pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
@@ -543,6 +546,8 @@ static struct fib6_node *fib6_add_1(struct fib6_node *root,
* That would keep IPv6 consistent with IPv4
*/
if (replace_required) {
+ NL_SET_ERR_MSG(extack,
+ "Can not replace route - no match found");
pr_warn("Can't replace route, no match found\n");
return ERR_PTR(-ENOENT);
}
@@ -964,7 +969,8 @@ void fib6_force_start_gc(struct net *net)
*/
int fib6_add(struct fib6_node *root, struct rt6_info *rt,
- struct nl_info *info, struct mx6_config *mxc)
+ struct nl_info *info, struct mx6_config *mxc,
+ struct netlink_ext_ack *extack)
{
struct fib6_node *fn, *pn = NULL;
int err = -ENOMEM;
@@ -987,7 +993,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
offsetof(struct rt6_info, rt6i_dst), allow_create,
- replace_required, sernum);
+ replace_required, sernum, extack);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
fn = NULL;
@@ -1028,7 +1034,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
- allow_create, replace_required, sernum);
+ allow_create, replace_required, sernum,
+ extack);
if (IS_ERR(sn)) {
/* If it is failed, discard just allocated
@@ -1047,7 +1054,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
- allow_create, replace_required, sernum);
+ allow_create, replace_required, sernum,
+ extack);
if (IS_ERR(sn)) {
err = PTR_ERR(sn);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index eedee5d108d9..f63b18e05c69 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -220,9 +220,6 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook)
__be16 fo;
u8 proto;
- if (skb->csum_bad)
- return false;
-
if (skb_csum_unnecessary(skb))
return true;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dc61b0b5e64e..2fe84bdc4e60 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -938,14 +938,15 @@ EXPORT_SYMBOL(rt6_lookup);
*/
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
- struct mx6_config *mxc)
+ struct mx6_config *mxc,
+ struct netlink_ext_ack *extack)
{
int err;
struct fib6_table *table;
table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
- err = fib6_add(&table->tb6_root, rt, info, mxc);
+ err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
write_unlock_bh(&table->tb6_lock);
return err;
@@ -956,7 +957,7 @@ int ip6_ins_rt(struct rt6_info *rt)
struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
struct mx6_config mxc = { .mx = NULL, };
- return __ip6_ins_rt(rt, &info, &mxc);
+ return __ip6_ins_rt(rt, &info, &mxc, NULL);
}
static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
@@ -1844,7 +1845,8 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
return rt;
}
-static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
+static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct net *net = cfg->fc_nlinfo.nl_net;
struct rt6_info *rt = NULL;
@@ -1855,14 +1857,25 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
int err = -EINVAL;
/* RTF_PCPU is an internal flag; can not be set by userspace */
- if (cfg->fc_flags & RTF_PCPU)
+ if (cfg->fc_flags & RTF_PCPU) {
+ NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
goto out;
+ }
- if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
+ if (cfg->fc_dst_len > 128) {
+ NL_SET_ERR_MSG(extack, "Invalid prefix length");
+ goto out;
+ }
+ if (cfg->fc_src_len > 128) {
+ NL_SET_ERR_MSG(extack, "Invalid source address length");
goto out;
+ }
#ifndef CONFIG_IPV6_SUBTREES
- if (cfg->fc_src_len)
+ if (cfg->fc_src_len) {
+ NL_SET_ERR_MSG(extack,
+ "Specifying source address requires IPV6_SUBTREES to be enabled");
goto out;
+ }
#endif
if (cfg->fc_ifindex) {
err = -ENODEV;
@@ -2013,9 +2026,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
err = -EINVAL;
if (ipv6_chk_addr_and_flags(net, gw_addr,
gwa_type & IPV6_ADDR_LINKLOCAL ?
- dev : NULL, 0, 0))
+ dev : NULL, 0, 0)) {
+ NL_SET_ERR_MSG(extack, "Invalid gateway address");
goto out;
-
+ }
rt->rt6i_gateway = *gw_addr;
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -2031,8 +2045,11 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
addressing
*/
if (!(gwa_type & (IPV6_ADDR_UNICAST |
- IPV6_ADDR_MAPPED)))
+ IPV6_ADDR_MAPPED))) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid gateway address");
goto out;
+ }
if (cfg->fc_table) {
grt = ip6_nh_lookup_table(net, cfg, gw_addr);
@@ -2072,8 +2089,14 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
goto out;
}
err = -EINVAL;
- if (!dev || (dev->flags & IFF_LOOPBACK))
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Egress device not specified");
+ goto out;
+ } else if (dev->flags & IFF_LOOPBACK) {
+ NL_SET_ERR_MSG(extack,
+ "Egress device can not be loopback device for this route");
goto out;
+ }
}
err = -ENODEV;
@@ -2082,6 +2105,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
+ NL_SET_ERR_MSG(extack, "Invalid source address");
err = -EINVAL;
goto out;
}
@@ -2111,13 +2135,14 @@ out:
return ERR_PTR(err);
}
-int ip6_route_add(struct fib6_config *cfg)
+int ip6_route_add(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct mx6_config mxc = { .mx = NULL, };
struct rt6_info *rt;
int err;
- rt = ip6_route_info_create(cfg);
+ rt = ip6_route_info_create(cfg, extack);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
@@ -2128,7 +2153,7 @@ int ip6_route_add(struct fib6_config *cfg)
if (err)
goto out;
- err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
+ err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
kfree(mxc.mx);
@@ -2222,7 +2247,8 @@ out_put:
return err;
}
-static int ip6_route_del(struct fib6_config *cfg)
+static int ip6_route_del(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct fib6_table *table;
struct fib6_node *fn;
@@ -2230,8 +2256,10 @@ static int ip6_route_del(struct fib6_config *cfg)
int err = -ESRCH;
table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
- if (!table)
+ if (!table) {
+ NL_SET_ERR_MSG(extack, "FIB table does not exist");
return err;
+ }
read_lock_bh(&table->tb6_lock);
@@ -2483,7 +2511,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
if (!prefixlen)
cfg.fc_flags |= RTF_DEFAULT;
- ip6_route_add(&cfg);
+ ip6_route_add(&cfg, NULL);
return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
}
@@ -2529,7 +2557,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
cfg.fc_gateway = *gwaddr;
- if (!ip6_route_add(&cfg)) {
+ if (!ip6_route_add(&cfg, NULL)) {
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), cfg.fc_table);
@@ -2622,10 +2650,10 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
rtnl_lock();
switch (cmd) {
case SIOCADDRT:
- err = ip6_route_add(&cfg);
+ err = ip6_route_add(&cfg, NULL);
break;
case SIOCDELRT:
- err = ip6_route_del(&cfg);
+ err = ip6_route_del(&cfg, NULL);
break;
default:
err = -EINVAL;
@@ -2903,7 +2931,8 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct fib6_config *cfg)
+ struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
@@ -3097,7 +3126,8 @@ static void ip6_route_mpath_notify(struct rt6_info *rt,
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
}
-static int ip6_route_multipath_add(struct fib6_config *cfg)
+static int ip6_route_multipath_add(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct rt6_info *rt_notif = NULL, *rt_last = NULL;
struct nl_info *info = &cfg->fc_nlinfo;
@@ -3145,7 +3175,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
r_cfg.fc_encap_type = nla_get_u16(nla);
}
- rt = ip6_route_info_create(&r_cfg);
+ rt = ip6_route_info_create(&r_cfg, extack);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
@@ -3170,7 +3200,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
rt_last = nh->rt6_info;
- err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc);
+ err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
/* save reference to first route for notification */
if (!rt_notif && !err)
rt_notif = nh->rt6_info;
@@ -3212,7 +3242,7 @@ add_errout:
list_for_each_entry(nh, &rt6_nh_list, next) {
if (err_nh == nh)
break;
- ip6_route_del(&nh->r_cfg);
+ ip6_route_del(&nh->r_cfg, extack);
}
cleanup:
@@ -3227,7 +3257,8 @@ cleanup:
return err;
}
-static int ip6_route_multipath_del(struct fib6_config *cfg)
+static int ip6_route_multipath_del(struct fib6_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
@@ -3254,7 +3285,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg)
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
- err = ip6_route_del(&r_cfg);
+ err = ip6_route_del(&r_cfg, extack);
if (err)
last_err = err;
@@ -3270,15 +3301,15 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct fib6_config cfg;
int err;
- err = rtm_to_fib6_config(skb, nlh, &cfg);
+ err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
if (err < 0)
return err;
if (cfg.fc_mp)
- return ip6_route_multipath_del(&cfg);
+ return ip6_route_multipath_del(&cfg, extack);
else {
cfg.fc_delete_all_nh = 1;
- return ip6_route_del(&cfg);
+ return ip6_route_del(&cfg, extack);
}
}
@@ -3288,14 +3319,14 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct fib6_config cfg;
int err;
- err = rtm_to_fib6_config(skb, nlh, &cfg);
+ err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
if (err < 0)
return err;
if (cfg.fc_mp)
- return ip6_route_multipath_add(&cfg);
+ return ip6_route_multipath_add(&cfg, extack);
else
- return ip6_route_add(&cfg);
+ return ip6_route_add(&cfg, extack);
}
static size_t rt6_nlmsg_size(struct rt6_info *rt)
@@ -3576,11 +3607,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX+1];
+ int err, iif = 0, oif = 0;
+ struct dst_entry *dst;
struct rt6_info *rt;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6;
- int err, iif = 0, oif = 0;
+ bool fibmatch;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
extack);
@@ -3591,6 +3624,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
memset(&fl6, 0, sizeof(fl6));
rtm = nlmsg_data(nlh);
fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
+ fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
if (tb[RTA_SRC]) {
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
@@ -3636,12 +3670,23 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (!ipv6_addr_any(&fl6.saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
- rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
- flags);
+ if (!fibmatch)
+ dst = ip6_route_input_lookup(net, dev, &fl6, flags);
} else {
fl6.flowi6_oif = oif;
- rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
+ if (!fibmatch)
+ dst = ip6_route_output(net, NULL, &fl6);
+ }
+
+ if (fibmatch)
+ dst = ip6_route_lookup(net, &fl6, 0);
+
+ rt = container_of(dst, struct rt6_info, dst);
+ if (rt->dst.error) {
+ err = rt->dst.error;
+ ip6_rt_put(rt);
+ goto errout;
}
if (rt == net->ipv6.ip6_null_entry) {
@@ -3658,10 +3703,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
skb_dst_set(skb, &rt->dst);
-
- err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
- RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
+ if (fibmatch)
+ err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
+ RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ else
+ err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
+ RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 5f44ffed2576..15fba55e3da8 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -303,13 +303,9 @@ static int seg6_genl_dumphmac_done(struct netlink_callback *cb)
static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
{
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
- struct net *net = sock_net(skb->sk);
- struct seg6_pernet_data *sdata;
struct seg6_hmac_info *hinfo;
int ret;
- sdata = seg6_pernet(net);
-
ret = rhashtable_walk_start(iter);
if (ret && ret != -EAGAIN)
goto done;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5abc3692b901..971823359f5b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
- treq->snt_synack.v64 = 0;
+ treq->snt_synack = 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
treq->ts_off = 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f4310a36a04..233edfabe1db 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp + tcptw->tw_ts_offset,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp + tcp_rsk(req)->ts_off,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 06ec39b79609..2e9b52bded2d 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -455,7 +455,8 @@ try_again:
return err;
csum_copy_err:
- if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
+ if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
+ udp_skb_destructor)) {
if (is_udp4) {
UDP_INC_STATS(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index deca20fb2ce2..da49191f7ad0 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1985,7 +1985,7 @@ static int kcm_create(struct net *net, struct socket *sock,
return 0;
}
-static struct net_proto_family kcm_family_ops = {
+static const struct net_proto_family kcm_family_ops = {
.family = PF_KCM,
.create = kcm_create,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index a504e87c6ddf..49bd8bb16b18 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
struct synproxy_options *opts)
{
opts->tsecr = opts->tsval;
- opts->tsval = tcp_time_stamp & ~0x3f;
+ opts->tsval = tcp_time_stamp_raw() & ~0x3f;
if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale;
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index 54e40fa47822..d3e594eb36d0 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -48,7 +48,7 @@ static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
return rc;
}
-static struct net_proto_family nfc_sock_family_ops = {
+static const struct net_proto_family nfc_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_NFC,
.create = nfc_sock_create,
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 7b17da9a94a0..9ddc9f8412a2 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -453,7 +453,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
/* Complete checksum if needed */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
- (err = skb_checksum_help(skb)))
+ (err = skb_csum_hwoffload_help(skb, 0)))
goto out;
/* Older versions of OVS user space enforce alignment of the last
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e3eeed19cc7a..82ca49fba336 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -196,8 +196,7 @@ static void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status);
static void packet_increment_head(struct packet_ring_buffer *buff);
-static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
- struct tpacket_block_desc *);
+static int prb_curr_blk_in_use(struct tpacket_block_desc *);
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
struct packet_sock *);
static void prb_retire_current_block(struct tpacket_kbdq_core *,
@@ -721,7 +720,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
/* Case 1. Queue was frozen because user-space was
* lagging behind.
*/
- if (prb_curr_blk_in_use(pkc, pbd)) {
+ if (prb_curr_blk_in_use(pbd)) {
/*
* Ok, user-space is still behind.
* So just refresh the timer.
@@ -972,8 +971,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
}
}
-static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
- struct tpacket_block_desc *pbd)
+static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
{
return TP_STATUS_USER & BLOCK_STATUS(pbd);
}
@@ -1064,7 +1062,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
* Check if that last block which caused the queue to freeze,
* is still in_use by user-space.
*/
- if (prb_curr_blk_in_use(pkc, pbd)) {
+ if (prb_curr_blk_in_use(pbd)) {
/* Can't record this packet */
return NULL;
} else {
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index b9da4d6b914f..9c68d2f8ba39 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -19,6 +19,7 @@ rxrpc-y := \
local_event.o \
local_object.o \
misc.o \
+ net_ns.o \
output.o \
peer_event.o \
peer_object.o \
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 7fb59c3f1542..cd34ffbff1d1 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -38,9 +38,6 @@ MODULE_PARM_DESC(debug, "RxRPC debugging mask");
static struct proto rxrpc_proto;
static const struct proto_ops rxrpc_rpc_ops;
-/* local epoch for detecting local-end reset */
-u32 rxrpc_epoch;
-
/* current debugging ID */
atomic_t rxrpc_debug_id;
@@ -155,7 +152,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
memcpy(&rx->srx, srx, sizeof(rx->srx));
- local = rxrpc_lookup_local(&rx->srx);
+ local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
goto error_unlock;
@@ -434,7 +431,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
ret = -EAFNOSUPPORT;
goto error_unlock;
}
- local = rxrpc_lookup_local(&rx->srx);
+ local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
goto error_unlock;
@@ -582,9 +579,6 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
_enter("%p,%d", sock, protocol);
- if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
-
/* we support transport protocol UDP/UDP6 only */
if (protocol != PF_INET &&
IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
@@ -780,8 +774,6 @@ static int __init af_rxrpc_init(void)
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
- get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
- rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
get_random_bytes(&tmp, sizeof(tmp));
tmp &= 0x3fffffff;
if (tmp == 0)
@@ -809,6 +801,10 @@ static int __init af_rxrpc_init(void)
goto error_security;
}
+ ret = register_pernet_subsys(&rxrpc_net_ops);
+ if (ret)
+ goto error_pernet;
+
ret = proto_register(&rxrpc_proto, 1);
if (ret < 0) {
pr_crit("Cannot register protocol\n");
@@ -839,11 +835,6 @@ static int __init af_rxrpc_init(void)
goto error_sysctls;
}
-#ifdef CONFIG_PROC_FS
- proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
- proc_create("rxrpc_conns", 0, init_net.proc_net,
- &rxrpc_connection_seq_fops);
-#endif
return 0;
error_sysctls:
@@ -855,6 +846,8 @@ error_key_type:
error_sock:
proto_unregister(&rxrpc_proto);
error_proto:
+ unregister_pernet_subsys(&rxrpc_net_ops);
+error_pernet:
rxrpc_exit_security();
error_security:
destroy_workqueue(rxrpc_workqueue);
@@ -875,14 +868,16 @@ static void __exit af_rxrpc_exit(void)
unregister_key_type(&key_type_rxrpc);
sock_unregister(PF_RXRPC);
proto_unregister(&rxrpc_proto);
- rxrpc_destroy_all_calls();
- rxrpc_destroy_all_connections();
+ unregister_pernet_subsys(&rxrpc_net_ops);
ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
- rxrpc_destroy_all_locals();
- remove_proc_entry("rxrpc_conns", init_net.proc_net);
- remove_proc_entry("rxrpc_calls", init_net.proc_net);
+ /* Make sure the local and peer records pinned by any dying connections
+ * are released.
+ */
+ rcu_barrier();
+ rxrpc_destroy_client_conn_ids();
+
destroy_workqueue(rxrpc_workqueue);
rxrpc_exit_security();
kmem_cache_destroy(rxrpc_call_jar);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7486926e60a8..067dbb3121d0 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -11,6 +11,8 @@
#include <linux/atomic.h>
#include <linux/seqlock.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <rxrpc/packet.h>
@@ -65,6 +67,37 @@ enum {
};
/*
+ * Per-network namespace data.
+ */
+struct rxrpc_net {
+ struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
+ u32 epoch; /* Local epoch for detecting local-end reset */
+ struct list_head calls; /* List of calls active in this namespace */
+ rwlock_t call_lock; /* Lock for ->calls */
+
+ struct list_head conn_proc_list; /* List of conns in this namespace for proc */
+ struct list_head service_conns; /* Service conns in this namespace */
+ rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
+ struct delayed_work service_conn_reaper;
+
+ unsigned int nr_client_conns;
+ unsigned int nr_active_client_conns;
+ bool kill_all_client_conns;
+ spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
+ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
+ struct list_head waiting_client_conns;
+ struct list_head active_client_conns;
+ struct list_head idle_client_conns;
+ struct delayed_work client_conn_reaper;
+
+ struct list_head local_endpoints;
+ struct mutex local_mutex; /* Lock for ->local_endpoints */
+
+ spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
+ DECLARE_HASHTABLE (peer_hash, 10);
+};
+
+/*
* Service backlog preallocation.
*
* This contains circular buffers of preallocated peers, connections and calls
@@ -211,6 +244,7 @@ struct rxrpc_security {
struct rxrpc_local {
struct rcu_head rcu;
atomic_t usage;
+ struct rxrpc_net *rxnet; /* The network ns in which this resides */
struct list_head link;
struct socket *socket; /* my UDP socket */
struct work_struct processor;
@@ -601,7 +635,6 @@ struct rxrpc_ack_summary {
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
-extern u32 rxrpc_epoch;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
@@ -634,8 +667,6 @@ extern const char *const rxrpc_call_states[];
extern const char *const rxrpc_call_completions[];
extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar;
-extern struct list_head rxrpc_calls;
-extern rwlock_t rxrpc_call_lock;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
struct rxrpc_call *rxrpc_alloc_call(gfp_t);
@@ -653,7 +684,7 @@ void rxrpc_see_call(struct rxrpc_call *);
void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
void rxrpc_cleanup_call(struct rxrpc_call *);
-void __exit rxrpc_destroy_all_calls(void);
+void rxrpc_destroy_all_calls(struct rxrpc_net *);
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
{
@@ -773,7 +804,8 @@ int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
-void __exit rxrpc_destroy_all_client_connections(void);
+void rxrpc_discard_expired_client_conns(struct work_struct *);
+void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
/*
* conn_event.c
@@ -784,9 +816,6 @@ void rxrpc_process_connection(struct work_struct *);
* conn_object.c
*/
extern unsigned int rxrpc_connection_expiry;
-extern struct list_head rxrpc_connections;
-extern struct list_head rxrpc_connection_proc_list;
-extern rwlock_t rxrpc_connection_lock;
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
@@ -800,7 +829,8 @@ void rxrpc_see_connection(struct rxrpc_connection *);
void rxrpc_get_connection(struct rxrpc_connection *);
struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
void rxrpc_put_service_conn(struct rxrpc_connection *);
-void __exit rxrpc_destroy_all_connections(void);
+void rxrpc_service_connection_reaper(struct work_struct *);
+void rxrpc_destroy_all_connections(struct rxrpc_net *);
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
{
@@ -828,7 +858,7 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
*/
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
struct sk_buff *);
-struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
+struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
@@ -861,9 +891,9 @@ extern void rxrpc_process_local_events(struct rxrpc_local *);
/*
* local_object.c
*/
-struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
+struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
void __rxrpc_put_local(struct rxrpc_local *);
-void __exit rxrpc_destroy_all_locals(void);
+void rxrpc_destroy_all_locals(struct rxrpc_net *);
static inline void rxrpc_get_local(struct rxrpc_local *local)
{
@@ -902,6 +932,17 @@ extern unsigned int rxrpc_resend_timeout;
extern const s8 rxrpc_ack_priority[];
/*
+ * net_ns.c
+ */
+extern unsigned int rxrpc_net_id;
+extern struct pernet_operations rxrpc_net_ops;
+
+static inline struct rxrpc_net *rxrpc_net(struct net *net)
+{
+ return net_generic(net, rxrpc_net_id);
+}
+
+/*
* output.c
*/
int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 1752fcf8e8f1..a8515b0d4717 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -38,6 +38,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
{
const void *here = __builtin_return_address(0);
struct rxrpc_call *call;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
int max, tmp;
unsigned int size = RXRPC_BACKLOG_MAX;
unsigned int head, tail, call_head, call_tail;
@@ -79,7 +80,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
if (CIRC_CNT(head, tail, size) < max) {
struct rxrpc_connection *conn;
- conn = rxrpc_prealloc_service_connection(gfp);
+ conn = rxrpc_prealloc_service_connection(rxnet, gfp);
if (!conn)
return -ENOMEM;
b->conn_backlog[head] = conn;
@@ -136,9 +137,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
- write_lock(&rxrpc_call_lock);
- list_add_tail(&call->link, &rxrpc_calls);
- write_unlock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
+ list_add_tail(&call->link, &rxnet->calls);
+ write_unlock(&rxnet->call_lock);
b->call_backlog[call_head] = call;
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
@@ -185,6 +186,7 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{
struct rxrpc_backlog *b = rx->backlog;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
if (!b)
@@ -209,10 +211,10 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
tail = b->conn_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_connection *conn = b->conn_backlog[tail];
- write_lock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
list_del(&conn->link);
list_del(&conn->proc_link);
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
kfree(conn);
tail = (tail + 1) & (size - 1);
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 47f7f4205653..692110808baa 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -44,8 +44,6 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
};
struct kmem_cache *rxrpc_call_jar;
-LIST_HEAD(rxrpc_calls);
-DEFINE_RWLOCK(rxrpc_call_lock);
static void rxrpc_call_timer_expired(unsigned long _call)
{
@@ -207,6 +205,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call, *xcall;
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
struct rb_node *parent, **pp;
const void *here = __builtin_return_address(0);
int ret;
@@ -255,9 +254,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock);
- write_lock(&rxrpc_call_lock);
- list_add_tail(&call->link, &rxrpc_calls);
- write_unlock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
+ list_add_tail(&call->link, &rxnet->calls);
+ write_unlock(&rxnet->call_lock);
/* From this point on, the call is protected by its own lock. */
release_sock(&rx->sk);
@@ -508,6 +507,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
*/
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
+ struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0);
int n;
@@ -520,9 +520,12 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
_debug("call %d dead", call->debug_id);
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- write_lock(&rxrpc_call_lock);
- list_del_init(&call->link);
- write_unlock(&rxrpc_call_lock);
+ if (!list_empty(&call->link)) {
+ rxnet = rxrpc_net(sock_net(&call->socket->sk));
+ write_lock(&rxnet->call_lock);
+ list_del_init(&call->link);
+ write_unlock(&rxnet->call_lock);
+ }
rxrpc_cleanup_call(call);
}
@@ -570,21 +573,23 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
}
/*
- * Make sure that all calls are gone.
+ * Make sure that all calls are gone from a network namespace. To reach this
+ * point, any open UDP sockets in that namespace must have been closed, so any
+ * outstanding calls cannot be doing I/O.
*/
-void __exit rxrpc_destroy_all_calls(void)
+void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
{
struct rxrpc_call *call;
_enter("");
- if (list_empty(&rxrpc_calls))
+ if (list_empty(&rxnet->calls))
return;
- write_lock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
- while (!list_empty(&rxrpc_calls)) {
- call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
+ while (!list_empty(&rxnet->calls)) {
+ call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
_debug("Zapping call %p", call);
rxrpc_see_call(call);
@@ -595,10 +600,10 @@ void __exit rxrpc_destroy_all_calls(void)
rxrpc_call_states[call->state],
call->flags, call->events);
- write_unlock(&rxrpc_call_lock);
+ write_unlock(&rxnet->call_lock);
cond_resched();
- write_lock(&rxrpc_call_lock);
+ write_lock(&rxnet->call_lock);
}
- write_unlock(&rxrpc_call_lock);
+ write_unlock(&rxnet->call_lock);
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index e8dea0d49e7f..c86f3202f967 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -31,7 +31,7 @@
* may freely grant available channels to new calls and calls may be
* waiting on it for channels to become available.
*
- * The connection is on the rxrpc_active_client_conns list which is kept
+ * The connection is on the rxnet->active_client_conns list which is kept
* in activation order for culling purposes.
*
* rxrpc_nr_active_client_conns is held incremented also.
@@ -46,7 +46,7 @@
* expires, the EXPOSED flag is cleared and the connection transitions to
* the INACTIVE state.
*
- * The connection is on the rxrpc_idle_client_conns list which is kept in
+ * The connection is on the rxnet->idle_client_conns list which is kept in
* order of how soon they'll expire.
*
* There are flags of relevance to the cache:
@@ -85,27 +85,13 @@ __read_mostly unsigned int rxrpc_reap_client_connections = 900;
__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
-static unsigned int rxrpc_nr_client_conns;
-static unsigned int rxrpc_nr_active_client_conns;
-static __read_mostly bool rxrpc_kill_all_client_conns;
-
-static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock);
-static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex);
-static LIST_HEAD(rxrpc_waiting_client_conns);
-static LIST_HEAD(rxrpc_active_client_conns);
-static LIST_HEAD(rxrpc_idle_client_conns);
-
/*
* We use machine-unique IDs for our client connections.
*/
DEFINE_IDR(rxrpc_client_conn_ids);
static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
-static void rxrpc_cull_active_client_conns(void);
-static void rxrpc_discard_expired_client_conns(struct work_struct *);
-
-static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
- rxrpc_discard_expired_client_conns);
+static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
/*
* Get a connection ID and epoch for a client connection from the global pool.
@@ -116,6 +102,7 @@ static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
gfp_t gfp)
{
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
int id;
_enter("");
@@ -131,7 +118,7 @@ static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
spin_unlock(&rxrpc_conn_id_lock);
idr_preload_end();
- conn->proto.epoch = rxrpc_epoch;
+ conn->proto.epoch = rxnet->epoch;
conn->proto.cid = id << RXRPC_CIDSHIFT;
set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
_leave(" [CID %x]", conn->proto.cid);
@@ -183,6 +170,7 @@ static struct rxrpc_connection *
rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
{
struct rxrpc_connection *conn;
+ struct rxrpc_net *rxnet = cp->local->rxnet;
int ret;
_enter("");
@@ -213,9 +201,9 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
if (ret < 0)
goto error_2;
- write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
- write_unlock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
+ list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
+ write_unlock(&rxnet->conn_lock);
/* We steal the caller's peer ref. */
cp->peer = NULL;
@@ -243,12 +231,13 @@ error_0:
*/
static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
{
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
int id_cursor, id, distance, limit;
if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
goto dont_reuse;
- if (conn->proto.epoch != rxrpc_epoch)
+ if (conn->proto.epoch != rxnet->epoch)
goto mark_dont_reuse;
/* The IDR tree gets very expensive on memory if the connection IDs are
@@ -440,12 +429,13 @@ error:
/*
* Activate a connection.
*/
-static void rxrpc_activate_conn(struct rxrpc_connection *conn)
+static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
+ struct rxrpc_connection *conn)
{
trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
- rxrpc_nr_active_client_conns++;
- list_move_tail(&conn->cache_link, &rxrpc_active_client_conns);
+ rxnet->nr_active_client_conns++;
+ list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
}
/*
@@ -460,7 +450,8 @@ static void rxrpc_activate_conn(struct rxrpc_connection *conn)
* channels if it has been culled to make space and then re-requested by a new
* call.
*/
-static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
+static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
+ struct rxrpc_connection *conn)
{
unsigned int nr_conns;
@@ -469,12 +460,12 @@ static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE)
goto out;
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
- nr_conns = rxrpc_nr_client_conns;
+ nr_conns = rxnet->nr_client_conns;
if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
trace_rxrpc_client(conn, -1, rxrpc_client_count);
- rxrpc_nr_client_conns = nr_conns + 1;
+ rxnet->nr_client_conns = nr_conns + 1;
}
switch (conn->cache_state) {
@@ -494,21 +485,21 @@ static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
}
out_unlock:
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
out:
_leave(" [%d]", conn->cache_state);
return;
activate_conn:
_debug("activate");
- rxrpc_activate_conn(conn);
+ rxrpc_activate_conn(rxnet, conn);
goto out_unlock;
wait_for_capacity:
_debug("wait");
trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
- list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns);
+ list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
goto out_unlock;
}
@@ -660,18 +651,19 @@ int rxrpc_connect_call(struct rxrpc_call *call,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
+ struct rxrpc_net *rxnet = cp->local->rxnet;
int ret;
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- rxrpc_discard_expired_client_conns(NULL);
- rxrpc_cull_active_client_conns();
+ rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
+ rxrpc_cull_active_client_conns(rxnet);
ret = rxrpc_get_client_conn(call, cp, srx, gfp);
if (ret < 0)
return ret;
- rxrpc_animate_client_conn(call->conn);
+ rxrpc_animate_client_conn(rxnet, call->conn);
rxrpc_activate_channels(call->conn);
ret = rxrpc_wait_for_channel(call, gfp);
@@ -729,6 +721,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
unsigned int channel = call->cid & RXRPC_CHANNELMASK;
struct rxrpc_connection *conn = call->conn;
struct rxrpc_channel *chan = &conn->channels[channel];
+ struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk));
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
@@ -750,7 +743,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
/* We must deactivate or idle the connection if it's now
* waiting for nothing.
*/
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
list_empty(&conn->waiting_calls) &&
!conn->active_chans)
@@ -787,14 +780,14 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
* list. It might even get moved back to the active list whilst we're
* waiting for the lock.
*/
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
switch (conn->cache_state) {
case RXRPC_CONN_CLIENT_ACTIVE:
if (list_empty(&conn->waiting_calls)) {
rxrpc_deactivate_one_channel(conn, channel);
if (!conn->active_chans) {
- rxrpc_nr_active_client_conns--;
+ rxnet->nr_active_client_conns--;
goto idle_connection;
}
goto out;
@@ -820,7 +813,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
}
out:
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
out_2:
spin_unlock(&conn->channel_lock);
rxrpc_put_connection(conn);
@@ -835,11 +828,11 @@ idle_connection:
trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
conn->idle_timestamp = jiffies;
conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
- list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns);
- if (rxrpc_idle_client_conns.next == &conn->cache_link &&
- !rxrpc_kill_all_client_conns)
+ list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
+ if (rxnet->idle_client_conns.next == &conn->cache_link &&
+ !rxnet->kill_all_client_conns)
queue_delayed_work(rxrpc_workqueue,
- &rxrpc_client_conn_reap,
+ &rxnet->client_conn_reaper,
rxrpc_conn_idle_client_expiry);
} else {
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
@@ -857,6 +850,7 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
{
struct rxrpc_connection *next = NULL;
struct rxrpc_local *local = conn->params.local;
+ struct rxrpc_net *rxnet = local->rxnet;
unsigned int nr_conns;
trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
@@ -875,18 +869,18 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
- spin_lock(&rxrpc_client_conn_cache_lock);
- nr_conns = --rxrpc_nr_client_conns;
+ spin_lock(&rxnet->client_conn_cache_lock);
+ nr_conns = --rxnet->nr_client_conns;
if (nr_conns < rxrpc_max_client_connections &&
- !list_empty(&rxrpc_waiting_client_conns)) {
- next = list_entry(rxrpc_waiting_client_conns.next,
+ !list_empty(&rxnet->waiting_client_conns)) {
+ next = list_entry(rxnet->waiting_client_conns.next,
struct rxrpc_connection, cache_link);
rxrpc_get_connection(next);
- rxrpc_activate_conn(next);
+ rxrpc_activate_conn(rxnet, next);
}
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
}
rxrpc_kill_connection(conn);
@@ -921,10 +915,10 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn)
/*
* Kill the longest-active client connections to make room for new ones.
*/
-static void rxrpc_cull_active_client_conns(void)
+static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
{
struct rxrpc_connection *conn;
- unsigned int nr_conns = rxrpc_nr_client_conns;
+ unsigned int nr_conns = rxnet->nr_client_conns;
unsigned int nr_active, limit;
_enter("");
@@ -936,12 +930,12 @@ static void rxrpc_cull_active_client_conns(void)
}
limit = rxrpc_reap_client_connections;
- spin_lock(&rxrpc_client_conn_cache_lock);
- nr_active = rxrpc_nr_active_client_conns;
+ spin_lock(&rxnet->client_conn_cache_lock);
+ nr_active = rxnet->nr_active_client_conns;
while (nr_active > limit) {
- ASSERT(!list_empty(&rxrpc_active_client_conns));
- conn = list_entry(rxrpc_active_client_conns.next,
+ ASSERT(!list_empty(&rxnet->active_client_conns));
+ conn = list_entry(rxnet->active_client_conns.next,
struct rxrpc_connection, cache_link);
ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE);
@@ -953,14 +947,14 @@ static void rxrpc_cull_active_client_conns(void)
trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
list_move_tail(&conn->cache_link,
- &rxrpc_waiting_client_conns);
+ &rxnet->waiting_client_conns);
}
nr_active--;
}
- rxrpc_nr_active_client_conns = nr_active;
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ rxnet->nr_active_client_conns = nr_active;
+ spin_unlock(&rxnet->client_conn_cache_lock);
ASSERTCMP(nr_active, >=, 0);
_leave(" [culled]");
}
@@ -972,22 +966,25 @@ static void rxrpc_cull_active_client_conns(void)
* This may be called from conn setup or from a work item so cannot be
* considered non-reentrant.
*/
-static void rxrpc_discard_expired_client_conns(struct work_struct *work)
+void rxrpc_discard_expired_client_conns(struct work_struct *work)
{
struct rxrpc_connection *conn;
+ struct rxrpc_net *rxnet =
+ container_of(to_delayed_work(work),
+ struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns;
bool did_discard = false;
- _enter("%c", work ? 'w' : 'n');
+ _enter("");
- if (list_empty(&rxrpc_idle_client_conns)) {
+ if (list_empty(&rxnet->idle_client_conns)) {
_leave(" [empty]");
return;
}
/* Don't double up on the discarding */
- if (!spin_trylock(&rxrpc_client_conn_discard_mutex)) {
+ if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
_leave(" [already]");
return;
}
@@ -995,19 +992,19 @@ static void rxrpc_discard_expired_client_conns(struct work_struct *work)
/* We keep an estimate of what the number of conns ought to be after
* we've discarded some so that we don't overdo the discarding.
*/
- nr_conns = rxrpc_nr_client_conns;
+ nr_conns = rxnet->nr_client_conns;
next:
- spin_lock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
- if (list_empty(&rxrpc_idle_client_conns))
+ if (list_empty(&rxnet->idle_client_conns))
goto out;
- conn = list_entry(rxrpc_idle_client_conns.next,
+ conn = list_entry(rxnet->idle_client_conns.next,
struct rxrpc_connection, cache_link);
ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
- if (!rxrpc_kill_all_client_conns) {
+ if (!rxnet->kill_all_client_conns) {
/* If the number of connections is over the reap limit, we
* expedite discard by reducing the expiry timeout. We must,
* however, have at least a short grace period to be able to do
@@ -1030,7 +1027,7 @@ next:
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
list_del_init(&conn->cache_link);
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_cache_lock);
/* When we cleared the EXPOSED flag, we took on responsibility for the
* reference that that had on the usage count. We deal with that here.
@@ -1050,14 +1047,14 @@ not_yet_expired:
* then things get messier.
*/
_debug("not yet");
- if (!rxrpc_kill_all_client_conns)
+ if (!rxnet->kill_all_client_conns)
queue_delayed_work(rxrpc_workqueue,
- &rxrpc_client_conn_reap,
+ &rxnet->client_conn_reaper,
conn_expires_at - now);
out:
- spin_unlock(&rxrpc_client_conn_cache_lock);
- spin_unlock(&rxrpc_client_conn_discard_mutex);
+ spin_unlock(&rxnet->client_conn_cache_lock);
+ spin_unlock(&rxnet->client_conn_discard_lock);
_leave("");
}
@@ -1065,17 +1062,17 @@ out:
* Preemptively destroy all the client connection records rather than waiting
* for them to time out
*/
-void __exit rxrpc_destroy_all_client_connections(void)
+void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
{
_enter("");
- spin_lock(&rxrpc_client_conn_cache_lock);
- rxrpc_kill_all_client_conns = true;
- spin_unlock(&rxrpc_client_conn_cache_lock);
+ spin_lock(&rxnet->client_conn_cache_lock);
+ rxnet->kill_all_client_conns = true;
+ spin_unlock(&rxnet->client_conn_cache_lock);
- cancel_delayed_work(&rxrpc_client_conn_reap);
+ cancel_delayed_work(&rxnet->client_conn_reaper);
- if (!queue_delayed_work(rxrpc_workqueue, &rxrpc_client_conn_reap, 0))
+ if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
_debug("destroy: queue failed");
_leave("");
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index b0ecb770fdce..ade4d3d0b2a7 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -22,13 +22,6 @@
*/
unsigned int rxrpc_connection_expiry = 10 * 60;
-static void rxrpc_connection_reaper(struct work_struct *work);
-
-LIST_HEAD(rxrpc_connections);
-LIST_HEAD(rxrpc_connection_proc_list);
-DEFINE_RWLOCK(rxrpc_connection_lock);
-static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
-
static void rxrpc_destroy_connection(struct rcu_head *);
/*
@@ -222,15 +215,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
*/
void rxrpc_kill_connection(struct rxrpc_connection *conn)
{
+ struct rxrpc_net *rxnet = conn->params.local->rxnet;
+
ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
!rcu_access_pointer(conn->channels[1].call) &&
!rcu_access_pointer(conn->channels[2].call) &&
!rcu_access_pointer(conn->channels[3].call));
ASSERT(list_empty(&conn->cache_link));
- write_lock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
list_del_init(&conn->proc_link);
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
/* Drain the Rx queue. Note that even though we've unpublished, an
* incoming packet could still be being added to our Rx queue, so we
@@ -309,14 +304,17 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
*/
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
+ struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0);
int n;
n = atomic_dec_return(&conn->usage);
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0);
- if (n == 0)
- rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+ if (n == 0) {
+ rxnet = conn->params.local->rxnet;
+ rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
+ }
}
/*
@@ -348,9 +346,12 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
/*
* reap dead service connections
*/
-static void rxrpc_connection_reaper(struct work_struct *work)
+void rxrpc_service_connection_reaper(struct work_struct *work)
{
struct rxrpc_connection *conn, *_p;
+ struct rxrpc_net *rxnet =
+ container_of(to_delayed_work(work),
+ struct rxrpc_net, service_conn_reaper);
unsigned long reap_older_than, earliest, idle_timestamp, now;
LIST_HEAD(graveyard);
@@ -361,8 +362,8 @@ static void rxrpc_connection_reaper(struct work_struct *work)
reap_older_than = now - rxrpc_connection_expiry * HZ;
earliest = ULONG_MAX;
- write_lock(&rxrpc_connection_lock);
- list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+ write_lock(&rxnet->conn_lock);
+ list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
ASSERTCMP(atomic_read(&conn->usage), >, 0);
if (likely(atomic_read(&conn->usage) > 1))
continue;
@@ -393,12 +394,12 @@ static void rxrpc_connection_reaper(struct work_struct *work)
list_move_tail(&conn->link, &graveyard);
}
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
if (earliest != ULONG_MAX) {
_debug("reschedule reaper %ld", (long) earliest - now);
ASSERT(time_after(earliest, now));
- rxrpc_queue_delayed_work(&rxrpc_connection_reap,
+ rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
earliest - now);
}
@@ -418,36 +419,30 @@ static void rxrpc_connection_reaper(struct work_struct *work)
* preemptively destroy all the service connection records rather than
* waiting for them to time out
*/
-void __exit rxrpc_destroy_all_connections(void)
+void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
{
struct rxrpc_connection *conn, *_p;
bool leak = false;
_enter("");
- rxrpc_destroy_all_client_connections();
+ rxrpc_destroy_all_client_connections(rxnet);
rxrpc_connection_expiry = 0;
- cancel_delayed_work(&rxrpc_connection_reap);
- rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+ cancel_delayed_work(&rxnet->client_conn_reaper);
+ rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
flush_workqueue(rxrpc_workqueue);
- write_lock(&rxrpc_connection_lock);
- list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+ write_lock(&rxnet->conn_lock);
+ list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
conn, atomic_read(&conn->usage));
leak = true;
}
- write_unlock(&rxrpc_connection_lock);
+ write_unlock(&rxnet->conn_lock);
BUG_ON(leak);
- ASSERT(list_empty(&rxrpc_connection_proc_list));
-
- /* Make sure the local and peer records pinned by any dying connections
- * are released.
- */
- rcu_barrier();
- rxrpc_destroy_client_conn_ids();
+ ASSERT(list_empty(&rxnet->conn_proc_list));
_leave("");
}
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index eef551f40dc2..edfc633f7d5e 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -121,7 +121,8 @@ replace_old_connection:
* Preallocate a service connection. The connection is placed on the proc and
* reap lists so that we don't have to get the lock from BH context.
*/
-struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp)
+struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
+ gfp_t gfp)
{
struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
@@ -132,10 +133,10 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp)
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
atomic_set(&conn->usage, 2);
- write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->link, &rxrpc_connections);
- list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
- write_unlock(&rxrpc_connection_lock);
+ write_lock(&rxnet->conn_lock);
+ list_add_tail(&conn->link, &rxnet->service_conns);
+ list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
+ write_unlock(&rxnet->conn_lock);
trace_rxrpc_conn(conn, rxrpc_conn_new_service,
atomic_read(&conn->usage),
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index ff4864d550b8..17d79fd73ade 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -25,9 +25,6 @@
static void rxrpc_local_processor(struct work_struct *);
static void rxrpc_local_rcu(struct rcu_head *);
-static DEFINE_MUTEX(rxrpc_local_mutex);
-static LIST_HEAD(rxrpc_local_endpoints);
-
/*
* Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
* same or greater than.
@@ -77,13 +74,15 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
/*
* Allocate a new local endpoint.
*/
-static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
+static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+ const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
atomic_set(&local->usage, 1);
+ local->rxnet = rxnet;
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
init_rwsem(&local->defrag_sem);
@@ -105,7 +104,7 @@ static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
* create the local socket
* - must be called with rxrpc_local_mutex locked
*/
-static int rxrpc_open_socket(struct rxrpc_local *local)
+static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{
struct sock *sock;
int ret, opt;
@@ -114,7 +113,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local)
local, local->srx.transport_type, local->srx.transport.family);
/* create a socket to represent the local endpoint */
- ret = sock_create_kern(&init_net, local->srx.transport.family,
+ ret = sock_create_kern(net, local->srx.transport.family,
local->srx.transport_type, 0, &local->socket);
if (ret < 0) {
_leave(" = %d [socket]", ret);
@@ -172,9 +171,11 @@ error:
/*
* Look up or create a new local endpoint using the specified local address.
*/
-struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
+struct rxrpc_local *rxrpc_lookup_local(struct net *net,
+ const struct sockaddr_rxrpc *srx)
{
struct rxrpc_local *local;
+ struct rxrpc_net *rxnet = rxrpc_net(net);
struct list_head *cursor;
const char *age;
long diff;
@@ -183,10 +184,10 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
_enter("{%d,%d,%pISp}",
srx->transport_type, srx->transport.family, &srx->transport);
- mutex_lock(&rxrpc_local_mutex);
+ mutex_lock(&rxnet->local_mutex);
- for (cursor = rxrpc_local_endpoints.next;
- cursor != &rxrpc_local_endpoints;
+ for (cursor = rxnet->local_endpoints.next;
+ cursor != &rxnet->local_endpoints;
cursor = cursor->next) {
local = list_entry(cursor, struct rxrpc_local, link);
@@ -220,11 +221,11 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
goto found;
}
- local = rxrpc_alloc_local(srx);
+ local = rxrpc_alloc_local(rxnet, srx);
if (!local)
goto nomem;
- ret = rxrpc_open_socket(local);
+ ret = rxrpc_open_socket(local, net);
if (ret < 0)
goto sock_error;
@@ -232,7 +233,7 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
age = "new";
found:
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
_net("LOCAL %s %d {%pISp}",
age, local->debug_id, &local->srx.transport);
@@ -243,13 +244,13 @@ found:
nomem:
ret = -ENOMEM;
sock_error:
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
kfree(local);
_leave(" = %d", ret);
return ERR_PTR(ret);
addr_in_use:
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
_leave(" = -EADDRINUSE");
return ERR_PTR(-EADDRINUSE);
}
@@ -273,6 +274,7 @@ void __rxrpc_put_local(struct rxrpc_local *local)
static void rxrpc_local_destroyer(struct rxrpc_local *local)
{
struct socket *socket = local->socket;
+ struct rxrpc_net *rxnet = local->rxnet;
_enter("%d", local->debug_id);
@@ -286,9 +288,9 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
}
local->dead = true;
- mutex_lock(&rxrpc_local_mutex);
+ mutex_lock(&rxnet->local_mutex);
list_del_init(&local->link);
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
ASSERT(RB_EMPTY_ROOT(&local->client_conns));
ASSERT(!local->service);
@@ -357,7 +359,7 @@ static void rxrpc_local_rcu(struct rcu_head *rcu)
/*
* Verify the local endpoint list is empty by this point.
*/
-void __exit rxrpc_destroy_all_locals(void)
+void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
{
struct rxrpc_local *local;
@@ -365,15 +367,13 @@ void __exit rxrpc_destroy_all_locals(void)
flush_workqueue(rxrpc_workqueue);
- if (!list_empty(&rxrpc_local_endpoints)) {
- mutex_lock(&rxrpc_local_mutex);
- list_for_each_entry(local, &rxrpc_local_endpoints, link) {
+ if (!list_empty(&rxnet->local_endpoints)) {
+ mutex_lock(&rxnet->local_mutex);
+ list_for_each_entry(local, &rxnet->local_endpoints, link) {
pr_err("AF_RXRPC: Leaked local %p {%d}\n",
local, atomic_read(&local->usage));
}
- mutex_unlock(&rxrpc_local_mutex);
+ mutex_unlock(&rxnet->local_mutex);
BUG();
}
-
- rcu_barrier();
}
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
new file mode 100644
index 000000000000..26449a6bb076
--- /dev/null
+++ b/net/rxrpc/net_ns.c
@@ -0,0 +1,85 @@
+/* rxrpc network namespace handling.
+ *
+ * Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/proc_fs.h>
+#include "ar-internal.h"
+
+unsigned int rxrpc_net_id;
+
+/*
+ * Initialise a per-network namespace record.
+ */
+static __net_init int rxrpc_init_net(struct net *net)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+ int ret;
+
+ get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
+ rxnet->epoch |= RXRPC_RANDOM_EPOCH;
+
+ INIT_LIST_HEAD(&rxnet->calls);
+ rwlock_init(&rxnet->call_lock);
+
+ INIT_LIST_HEAD(&rxnet->conn_proc_list);
+ INIT_LIST_HEAD(&rxnet->service_conns);
+ rwlock_init(&rxnet->conn_lock);
+ INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
+ rxrpc_service_connection_reaper);
+
+ rxnet->nr_client_conns = 0;
+ rxnet->nr_active_client_conns = 0;
+ rxnet->kill_all_client_conns = false;
+ spin_lock_init(&rxnet->client_conn_cache_lock);
+ spin_lock_init(&rxnet->client_conn_discard_lock);
+ INIT_LIST_HEAD(&rxnet->waiting_client_conns);
+ INIT_LIST_HEAD(&rxnet->active_client_conns);
+ INIT_LIST_HEAD(&rxnet->idle_client_conns);
+ INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
+ rxrpc_discard_expired_client_conns);
+
+ INIT_LIST_HEAD(&rxnet->local_endpoints);
+ mutex_init(&rxnet->local_mutex);
+ hash_init(rxnet->peer_hash);
+ spin_lock_init(&rxnet->peer_hash_lock);
+
+ ret = -ENOMEM;
+ rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
+ if (!rxnet->proc_net)
+ goto err_proc;
+
+ proc_create("calls", 0444, rxnet->proc_net, &rxrpc_call_seq_fops);
+ proc_create("conns", 0444, rxnet->proc_net, &rxrpc_connection_seq_fops);
+ return 0;
+
+ proc_remove(rxnet->proc_net);
+err_proc:
+ return ret;
+}
+
+/*
+ * Clean up a per-network namespace record.
+ */
+static __net_exit void rxrpc_exit_net(struct net *net)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(net);
+
+ rxrpc_destroy_all_calls(rxnet);
+ rxrpc_destroy_all_connections(rxnet);
+ rxrpc_destroy_all_locals(rxnet);
+ proc_remove(rxnet->proc_net);
+}
+
+struct pernet_operations rxrpc_net_ops = {
+ .init = rxrpc_init_net,
+ .exit = rxrpc_exit_net,
+ .id = &rxrpc_net_id,
+ .size = sizeof(struct rxrpc_net),
+};
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 862eea6b266c..cfed3b27adf0 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -26,9 +26,6 @@
#include <net/ip6_route.h>
#include "ar-internal.h"
-static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
-static DEFINE_SPINLOCK(rxrpc_peer_hash_lock);
-
/*
* Hash a peer key.
*/
@@ -124,8 +121,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
unsigned long hash_key)
{
struct rxrpc_peer *peer;
+ struct rxrpc_net *rxnet = local->rxnet;
- hash_for_each_possible_rcu(rxrpc_peer_hash, peer, hash_link, hash_key) {
+ hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
if (atomic_read(&peer->usage) == 0)
return NULL;
@@ -301,13 +299,14 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
struct rxrpc_peer *prealloc)
{
struct rxrpc_peer *peer;
+ struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
prealloc->local = local;
rxrpc_init_peer(prealloc, hash_key);
- spin_lock(&rxrpc_peer_hash_lock);
+ spin_lock(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
@@ -315,10 +314,10 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
peer = NULL;
if (!peer) {
peer = prealloc;
- hash_add_rcu(rxrpc_peer_hash, &peer->hash_link, hash_key);
+ hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
}
- spin_unlock(&rxrpc_peer_hash_lock);
+ spin_unlock(&rxnet->peer_hash_lock);
return peer;
}
@@ -329,6 +328,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
+ struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
_enter("{%pISp}", &srx->transport);
@@ -350,17 +350,17 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
return NULL;
}
- spin_lock_bh(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
if (!peer)
- hash_add_rcu(rxrpc_peer_hash,
+ hash_add_rcu(rxnet->peer_hash,
&candidate->hash_link, hash_key);
- spin_unlock_bh(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
kfree(candidate);
@@ -379,11 +379,13 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
*/
void __rxrpc_put_peer(struct rxrpc_peer *peer)
{
+ struct rxrpc_net *rxnet = peer->local->rxnet;
+
ASSERT(hlist_empty(&peer->error_targets));
- spin_lock_bh(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxnet->peer_hash_lock);
hash_del_rcu(&peer->hash_link);
- spin_unlock_bh(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxnet->peer_hash_lock);
kfree_rcu(peer, rcu);
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index b9bcfbfb095c..e92d8405b15a 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -30,19 +30,25 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
*/
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
rcu_read_lock();
- read_lock(&rxrpc_call_lock);
- return seq_list_start_head(&rxrpc_calls, *_pos);
+ read_lock(&rxnet->call_lock);
+ return seq_list_start_head(&rxnet->calls, *_pos);
}
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- return seq_list_next(v, &rxrpc_calls, pos);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ return seq_list_next(v, &rxnet->calls, pos);
}
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
{
- read_unlock(&rxrpc_call_lock);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ read_unlock(&rxnet->call_lock);
rcu_read_unlock();
}
@@ -52,10 +58,11 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_sock *rx;
struct rxrpc_peer *peer;
struct rxrpc_call *call;
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
rxrpc_seq_t tx_hard_ack, rx_hard_ack;
char lbuff[50], rbuff[50];
- if (v == &rxrpc_calls) {
+ if (v == &rxnet->calls) {
seq_puts(seq,
"Proto Local "
" Remote "
@@ -113,7 +120,8 @@ static const struct seq_operations rxrpc_call_seq_ops = {
static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &rxrpc_call_seq_ops);
+ return seq_open_net(inode, file, &rxrpc_call_seq_ops,
+ sizeof(struct seq_net_private));
}
const struct file_operations rxrpc_call_seq_fops = {
@@ -129,27 +137,34 @@ const struct file_operations rxrpc_call_seq_fops = {
*/
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
{
- read_lock(&rxrpc_connection_lock);
- return seq_list_start_head(&rxrpc_connection_proc_list, *_pos);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ read_lock(&rxnet->conn_lock);
+ return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
}
static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
- return seq_list_next(v, &rxrpc_connection_proc_list, pos);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ return seq_list_next(v, &rxnet->conn_proc_list, pos);
}
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
{
- read_unlock(&rxrpc_connection_lock);
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+
+ read_unlock(&rxnet->conn_lock);
}
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
{
struct rxrpc_connection *conn;
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
char lbuff[50], rbuff[50];
- if (v == &rxrpc_connection_proc_list) {
+ if (v == &rxnet->conn_proc_list) {
seq_puts(seq,
"Proto Local "
" Remote "
@@ -197,7 +212,8 @@ static const struct seq_operations rxrpc_connection_seq_ops = {
static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &rxrpc_connection_seq_ops);
+ return seq_open_net(inode, file, &rxrpc_connection_seq_ops,
+ sizeof(struct seq_net_private));
}
const struct file_operations rxrpc_connection_seq_fops = {
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a90e8f355c00..aed6cf2e9fd8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -28,6 +28,31 @@
#include <net/act_api.h>
#include <net/netlink.h>
+static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
+{
+ u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
+
+ if (!tp)
+ return -EINVAL;
+ a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true);
+ if (!a->goto_chain)
+ return -ENOMEM;
+ return 0;
+}
+
+static void tcf_action_goto_chain_fini(struct tc_action *a)
+{
+ tcf_chain_put(a->goto_chain);
+}
+
+static void tcf_action_goto_chain_exec(const struct tc_action *a,
+ struct tcf_result *res)
+{
+ const struct tcf_chain *chain = a->goto_chain;
+
+ res->goto_tp = rcu_dereference_bh(chain->filter_chain);
+}
+
static void free_tcf(struct rcu_head *head)
{
struct tc_action *p = container_of(head, struct tc_action, tcfa_rcu);
@@ -39,6 +64,8 @@ static void free_tcf(struct rcu_head *head)
kfree(p->act_cookie->data);
kfree(p->act_cookie);
}
+ if (p->goto_chain)
+ tcf_action_goto_chain_fini(p);
kfree(p);
}
@@ -465,6 +492,8 @@ repeat:
else /* faulty graph, stop pipeline */
return TC_ACT_OK;
}
+ } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
+ tcf_action_goto_chain_exec(a, res);
}
if (ret != TC_ACT_PIPE)
@@ -570,9 +599,9 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
return c;
}
-struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *name, int ovr,
- int bind)
+struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind)
{
struct tc_action *a;
struct tc_action_ops *a_o;
@@ -657,6 +686,17 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
if (err != ACT_P_CREATED)
module_put(a_o->owner);
+ if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
+ err = tcf_action_goto_chain_init(a, tp);
+ if (err) {
+ LIST_HEAD(actions);
+
+ list_add_tail(&a->list, &actions);
+ tcf_action_destroy(&actions, bind);
+ return ERR_PTR(err);
+ }
+ }
+
return a;
err_mod:
@@ -680,8 +720,9 @@ static void cleanup_a(struct list_head *actions, int ovr)
a->tcfa_refcnt--;
}
-int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind, struct list_head *actions)
+int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr, int bind,
+ struct list_head *actions)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
@@ -693,7 +734,7 @@ int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return err;
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
- act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
+ act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind);
if (IS_ERR(act)) {
err = PTR_ERR(act);
goto err;
@@ -1020,7 +1061,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
int ret = 0;
LIST_HEAD(actions);
- ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
+ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions);
if (ret)
return ret;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index ab6fdbd34db7..3317a2f579da 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -350,6 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
sctph->checksum = sctp_compute_cksum(skb,
skb_network_offset(skb) + ihl);
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
return 1;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 22f88b35a546..39da0c5801c9 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -106,13 +106,12 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n,
- struct tcf_proto __rcu **chain, int event)
+ struct tcf_chain *chain, int event)
{
- struct tcf_proto __rcu **it_chain;
struct tcf_proto *tp;
- for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
- it_chain = &tp->next)
+ for (tp = rtnl_dereference(chain->filter_chain);
+ tp; tp = rtnl_dereference(tp->next))
tfilter_notify(net, oskb, n, tp, 0, event, false);
}
@@ -125,11 +124,12 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
if (tp)
first = tp->prio - 1;
- return first;
+ return TC_H_MAJ(first);
}
static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
- u32 prio, u32 parent, struct Qdisc *q)
+ u32 prio, u32 parent, struct Qdisc *q,
+ struct tcf_chain *chain)
{
struct tcf_proto *tp;
int err;
@@ -165,6 +165,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->prio = prio;
tp->classid = parent;
tp->q = q;
+ tp->chain = chain;
err = tp->ops->init(tp);
if (err) {
@@ -185,16 +186,226 @@ static void tcf_proto_destroy(struct tcf_proto *tp)
kfree_rcu(tp, rcu);
}
-void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
+ u32 chain_index)
+{
+ struct tcf_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ return NULL;
+ list_add_tail(&chain->list, &block->chain_list);
+ chain->block = block;
+ chain->index = chain_index;
+ chain->refcnt = 1;
+ return chain;
+}
+
+static void tcf_chain_flush(struct tcf_chain *chain)
{
struct tcf_proto *tp;
- while ((tp = rtnl_dereference(*fl)) != NULL) {
- RCU_INIT_POINTER(*fl, tp->next);
+ if (*chain->p_filter_chain)
+ RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
+ while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
+ RCU_INIT_POINTER(chain->filter_chain, tp->next);
tcf_proto_destroy(tp);
}
}
-EXPORT_SYMBOL(tcf_destroy_chain);
+
+static void tcf_chain_destroy(struct tcf_chain *chain)
+{
+ list_del(&chain->list);
+ tcf_chain_flush(chain);
+ kfree(chain);
+}
+
+struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
+ bool create)
+{
+ struct tcf_chain *chain;
+
+ list_for_each_entry(chain, &block->chain_list, list) {
+ if (chain->index == chain_index) {
+ chain->refcnt++;
+ return chain;
+ }
+ }
+ if (create)
+ return tcf_chain_create(block, chain_index);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(tcf_chain_get);
+
+void tcf_chain_put(struct tcf_chain *chain)
+{
+ /* Destroy unused chain, with exception of chain 0, which is the
+ * default one and has to be always present.
+ */
+ if (--chain->refcnt == 0 && !chain->filter_chain && chain->index != 0)
+ tcf_chain_destroy(chain);
+}
+EXPORT_SYMBOL(tcf_chain_put);
+
+static void
+tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
+ struct tcf_proto __rcu **p_filter_chain)
+{
+ chain->p_filter_chain = p_filter_chain;
+}
+
+int tcf_block_get(struct tcf_block **p_block,
+ struct tcf_proto __rcu **p_filter_chain)
+{
+ struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ struct tcf_chain *chain;
+ int err;
+
+ if (!block)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&block->chain_list);
+ /* Create chain 0 by default, it has to be always present. */
+ chain = tcf_chain_create(block, 0);
+ if (!chain) {
+ err = -ENOMEM;
+ goto err_chain_create;
+ }
+ tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
+ *p_block = block;
+ return 0;
+
+err_chain_create:
+ kfree(block);
+ return err;
+}
+EXPORT_SYMBOL(tcf_block_get);
+
+void tcf_block_put(struct tcf_block *block)
+{
+ struct tcf_chain *chain, *tmp;
+
+ if (!block)
+ return;
+
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ tcf_chain_destroy(chain);
+ kfree(block);
+}
+EXPORT_SYMBOL(tcf_block_put);
+
+/* Main classifier routine: scans classifier chain attached
+ * to this qdisc, (optionally) tests for protocol and asks
+ * specific classifiers.
+ */
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ __be16 protocol = tc_skb_protocol(skb);
+#ifdef CONFIG_NET_CLS_ACT
+ const int max_reclassify_loop = 4;
+ const struct tcf_proto *orig_tp = tp;
+ const struct tcf_proto *first_tp;
+ int limit = 0;
+
+reclassify:
+#endif
+ for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ int err;
+
+ if (tp->protocol != protocol &&
+ tp->protocol != htons(ETH_P_ALL))
+ continue;
+
+ err = tp->classify(skb, tp, res);
+#ifdef CONFIG_NET_CLS_ACT
+ if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
+ first_tp = orig_tp;
+ goto reset;
+ } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
+ first_tp = res->goto_tp;
+ goto reset;
+ }
+#endif
+ if (err >= 0)
+ return err;
+ }
+
+ return TC_ACT_UNSPEC; /* signal: continue lookup */
+#ifdef CONFIG_NET_CLS_ACT
+reset:
+ if (unlikely(limit++ >= max_reclassify_loop)) {
+ net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
+ tp->q->ops->id, tp->prio & 0xffff,
+ ntohs(tp->protocol));
+ return TC_ACT_SHOT;
+ }
+
+ tp = first_tp;
+ protocol = tc_skb_protocol(skb);
+ goto reclassify;
+#endif
+}
+EXPORT_SYMBOL(tcf_classify);
+
+struct tcf_chain_info {
+ struct tcf_proto __rcu **pprev;
+ struct tcf_proto __rcu *next;
+};
+
+static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
+{
+ return rtnl_dereference(*chain_info->pprev);
+}
+
+static void tcf_chain_tp_insert(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ struct tcf_proto *tp)
+{
+ if (chain->p_filter_chain &&
+ *chain_info->pprev == chain->filter_chain)
+ rcu_assign_pointer(*chain->p_filter_chain, tp);
+ RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
+ rcu_assign_pointer(*chain_info->pprev, tp);
+}
+
+static void tcf_chain_tp_remove(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ struct tcf_proto *tp)
+{
+ struct tcf_proto *next = rtnl_dereference(chain_info->next);
+
+ if (chain->p_filter_chain && tp == chain->filter_chain)
+ RCU_INIT_POINTER(*chain->p_filter_chain, next);
+ RCU_INIT_POINTER(*chain_info->pprev, next);
+}
+
+static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
+ struct tcf_chain_info *chain_info,
+ u32 protocol, u32 prio,
+ bool prio_allocate)
+{
+ struct tcf_proto **pprev;
+ struct tcf_proto *tp;
+
+ /* Check the chain for existence of proto-tcf with this priority */
+ for (pprev = &chain->filter_chain;
+ (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
+ if (tp->prio >= prio) {
+ if (tp->prio == prio) {
+ if (prio_allocate ||
+ (tp->protocol != protocol && protocol))
+ return ERR_PTR(-EINVAL);
+ } else {
+ tp = NULL;
+ }
+ break;
+ }
+ }
+ chain_info->pprev = pprev;
+ chain_info->next = tp ? tp->next : NULL;
+ return tp;
+}
/* Add/change/delete/get a filter node */
@@ -206,13 +417,14 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
struct tcmsg *t;
u32 protocol;
u32 prio;
- u32 nprio;
+ bool prio_allocate;
u32 parent;
+ u32 chain_index;
struct net_device *dev;
struct Qdisc *q;
- struct tcf_proto __rcu **back;
- struct tcf_proto __rcu **chain;
- struct tcf_proto *next;
+ struct tcf_chain_info chain_info;
+ struct tcf_chain *chain = NULL;
+ struct tcf_block *block;
struct tcf_proto *tp;
const struct Qdisc_class_ops *cops;
unsigned long cl;
@@ -234,7 +446,7 @@ replay:
t = nlmsg_data(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
- nprio = prio;
+ prio_allocate = false;
parent = t->tcm_parent;
cl = 0;
@@ -250,6 +462,7 @@ replay:
*/
if (n->nlmsg_flags & NLM_F_CREATE) {
prio = TC_H_MAKE(0x80000000U, 0U);
+ prio_allocate = true;
break;
}
/* fall-through */
@@ -280,7 +493,7 @@ replay:
if (!cops)
return -EINVAL;
- if (cops->tcf_chain == NULL)
+ if (!cops->tcf_block)
return -EOPNOTSUPP;
/* Do we search for filter, attached to class? */
@@ -291,34 +504,36 @@ replay:
}
/* And the last stroke */
- chain = cops->tcf_chain(q, cl);
- if (chain == NULL) {
+ block = cops->tcf_block(q, cl);
+ if (!block) {
err = -EINVAL;
goto errout;
}
+
+ chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ if (chain_index > TC_ACT_EXT_VAL_MASK) {
+ err = -EINVAL;
+ goto errout;
+ }
+ chain = tcf_chain_get(block, chain_index,
+ n->nlmsg_type == RTM_NEWTFILTER);
+ if (!chain) {
+ err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
+ goto errout;
+ }
+
if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
- tcf_destroy_chain(chain);
+ tcf_chain_flush(chain);
err = 0;
goto errout;
}
- /* Check the chain for existence of proto-tcf with this priority */
- for (back = chain;
- (tp = rtnl_dereference(*back)) != NULL;
- back = &tp->next) {
- if (tp->prio >= prio) {
- if (tp->prio == prio) {
- if (!nprio ||
- (tp->protocol != protocol && protocol)) {
- err = -EINVAL;
- goto errout;
- }
- } else {
- tp = NULL;
- }
- break;
- }
+ tp = tcf_chain_tp_find(chain, &chain_info, protocol,
+ prio, prio_allocate);
+ if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
+ goto errout;
}
if (tp == NULL) {
@@ -335,11 +550,11 @@ replay:
goto errout;
}
- if (!nprio)
- nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
+ if (prio_allocate)
+ prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
- protocol, nprio, parent, q);
+ protocol, prio, parent, q, chain);
if (IS_ERR(tp)) {
err = PTR_ERR(tp);
goto errout;
@@ -354,8 +569,7 @@ replay:
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- next = rtnl_dereference(tp->next);
- RCU_INIT_POINTER(*back, next);
+ tcf_chain_tp_remove(chain, &chain_info, tp);
tfilter_notify(net, skb, n, tp, fh,
RTM_DELTFILTER, false);
tcf_proto_destroy(tp);
@@ -384,11 +598,10 @@ replay:
err = tp->ops->delete(tp, fh, &last);
if (err)
goto errout;
- next = rtnl_dereference(tp->next);
tfilter_notify(net, skb, n, tp, t->tcm_handle,
RTM_DELTFILTER, false);
if (last) {
- RCU_INIT_POINTER(*back, next);
+ tcf_chain_tp_remove(chain, &chain_info, tp);
tcf_proto_destroy(tp);
}
goto errout;
@@ -405,10 +618,8 @@ replay:
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
if (err == 0) {
- if (tp_created) {
- RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
- rcu_assign_pointer(*back, tp);
- }
+ if (tp_created)
+ tcf_chain_tp_insert(chain, &chain_info, tp);
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
} else {
if (tp_created)
@@ -416,6 +627,8 @@ replay:
}
errout:
+ if (chain)
+ tcf_chain_put(chain);
if (cl)
cops->put(q, cl);
if (err == -EAGAIN)
@@ -444,6 +657,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
+ goto nla_put_failure;
tcm->tcm_handle = fh;
if (RTM_DELTFILTER != event) {
tcm->tcm_handle = 0;
@@ -500,22 +715,76 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
RTM_NEWTFILTER);
}
+static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ long index_start, long *p_index)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
+ struct tcf_dump_args arg;
+ struct tcf_proto *tp;
+
+ for (tp = rtnl_dereference(chain->filter_chain);
+ tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
+ if (*p_index < index_start)
+ continue;
+ if (TC_H_MAJ(tcm->tcm_info) &&
+ TC_H_MAJ(tcm->tcm_info) != tp->prio)
+ continue;
+ if (TC_H_MIN(tcm->tcm_info) &&
+ TC_H_MIN(tcm->tcm_info) != tp->protocol)
+ continue;
+ if (*p_index > index_start)
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ if (cb->args[1] == 0) {
+ if (tcf_fill_node(net, skb, tp, 0,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWTFILTER) <= 0)
+ return false;
+
+ cb->args[1] = 1;
+ }
+ if (!tp->ops->walk)
+ continue;
+ arg.w.fn = tcf_node_dump;
+ arg.skb = skb;
+ arg.cb = cb;
+ arg.w.stop = 0;
+ arg.w.skip = cb->args[1] - 1;
+ arg.w.count = 0;
+ tp->ops->walk(tp, &arg.w);
+ cb->args[1] = arg.w.count + 1;
+ if (arg.w.stop)
+ return false;
+ }
+ return true;
+}
+
/* called with RTNL */
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int t;
- int s_t;
+ struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct Qdisc *q;
- struct tcf_proto *tp, __rcu **chain;
+ struct tcf_block *block;
+ struct tcf_chain *chain;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
unsigned long cl = 0;
const struct Qdisc_class_ops *cops;
- struct tcf_dump_args arg;
+ long index_start;
+ long index;
+ int err;
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return skb->len;
+
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ if (err)
+ return err;
+
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev)
return skb->len;
@@ -529,56 +798,29 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
cops = q->ops->cl_ops;
if (!cops)
goto errout;
- if (cops->tcf_chain == NULL)
+ if (!cops->tcf_block)
goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
cl = cops->get(q, tcm->tcm_parent);
if (cl == 0)
goto errout;
}
- chain = cops->tcf_chain(q, cl);
- if (chain == NULL)
+ block = cops->tcf_block(q, cl);
+ if (!block)
goto errout;
- s_t = cb->args[0];
-
- for (tp = rtnl_dereference(*chain), t = 0;
- tp; tp = rtnl_dereference(tp->next), t++) {
- if (t < s_t)
- continue;
- if (TC_H_MAJ(tcm->tcm_info) &&
- TC_H_MAJ(tcm->tcm_info) != tp->prio)
- continue;
- if (TC_H_MIN(tcm->tcm_info) &&
- TC_H_MIN(tcm->tcm_info) != tp->protocol)
- continue;
- if (t > s_t)
- memset(&cb->args[1], 0,
- sizeof(cb->args)-sizeof(cb->args[0]));
- if (cb->args[1] == 0) {
- if (tcf_fill_node(net, skb, tp, 0,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWTFILTER) <= 0)
- break;
+ index_start = cb->args[0];
+ index = 0;
- cb->args[1] = 1;
- }
- if (tp->ops->walk == NULL)
+ list_for_each_entry(chain, &block->chain_list, list) {
+ if (tca[TCA_CHAIN] &&
+ nla_get_u32(tca[TCA_CHAIN]) != chain->index)
continue;
- arg.w.fn = tcf_node_dump;
- arg.skb = skb;
- arg.cb = cb;
- arg.w.stop = 0;
- arg.w.skip = cb->args[1] - 1;
- arg.w.count = 0;
- tp->ops->walk(tp, &arg.w);
- cb->args[1] = arg.w.count + 1;
- if (arg.w.stop)
+ if (!tcf_chain_dump(chain, skb, cb, index_start, &index))
break;
}
- cb->args[0] = t;
+ cb->args[0] = index;
errout:
if (cl)
@@ -608,8 +850,9 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
struct tc_action *act;
if (exts->police && tb[exts->police]) {
- act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
- "police", ovr, TCA_ACT_BIND);
+ act = tcf_action_init_1(net, tp, tb[exts->police],
+ rate_tlv, "police", ovr,
+ TCA_ACT_BIND);
if (IS_ERR(act))
return PTR_ERR(act);
@@ -620,8 +863,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
LIST_HEAD(actions);
int err, i = 0;
- err = tcf_action_init(net, tb[exts->action], rate_tlv,
- NULL, ovr, TCA_ACT_BIND,
+ err = tcf_action_init(net, tp, tb[exts->action],
+ rate_tlv, NULL, ovr, TCA_ACT_BIND,
&actions);
if (err)
return err;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index ca526c0881bd..fb74a47830f4 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -49,6 +49,7 @@ struct fl_flow_key {
};
struct flow_dissector_key_ports enc_tp;
struct flow_dissector_key_mpls mpls;
+ struct flow_dissector_key_tcp tcp;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -424,6 +425,8 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
};
static void fl_set_key_val(struct nlattr **tb,
@@ -596,6 +599,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
&mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
sizeof(key->tp.dst));
+ fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
+ &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
+ sizeof(key->tcp.flags));
} else if (key->basic.ip_proto == IPPROTO_UDP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
&mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
@@ -767,6 +773,8 @@ static void fl_init_dissector(struct cls_fl_head *head,
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_PORTS, tp);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_TCP, tcp);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ICMP, icmp);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ARP, arp);
@@ -1215,7 +1223,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
sizeof(key->tp.src)) ||
fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
&mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
- sizeof(key->tp.dst))))
+ sizeof(key->tp.dst)) ||
+ fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
+ &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
+ sizeof(key->tcp.flags))))
goto nla_put_failure;
else if (key->basic.ip_proto == IPPROTO_UDP &&
(fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e88342fde1bc..5d95401bbc02 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -163,7 +163,7 @@ int register_qdisc(struct Qdisc_ops *qops)
if (!(cops->get && cops->put && cops->walk && cops->leaf))
goto out_einval;
- if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
+ if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
goto out_einval;
}
@@ -1878,54 +1878,6 @@ done:
return skb->len;
}
-/* Main classifier routine: scans classifier chain attached
- * to this qdisc, (optionally) tests for protocol and asks
- * specific classifiers.
- */
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode)
-{
- __be16 protocol = tc_skb_protocol(skb);
-#ifdef CONFIG_NET_CLS_ACT
- const int max_reclassify_loop = 4;
- const struct tcf_proto *old_tp = tp;
- int limit = 0;
-
-reclassify:
-#endif
- for (; tp; tp = rcu_dereference_bh(tp->next)) {
- int err;
-
- if (tp->protocol != protocol &&
- tp->protocol != htons(ETH_P_ALL))
- continue;
-
- err = tp->classify(skb, tp, res);
-#ifdef CONFIG_NET_CLS_ACT
- if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
- goto reset;
-#endif
- if (err >= 0)
- return err;
- }
-
- return TC_ACT_UNSPEC; /* signal: continue lookup */
-#ifdef CONFIG_NET_CLS_ACT
-reset:
- if (unlikely(limit++ >= max_reclassify_loop)) {
- net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
- tp->q->ops->id, tp->prio & 0xffff,
- ntohs(tp->protocol));
- return TC_ACT_SHOT;
- }
-
- tp = old_tp;
- protocol = tc_skb_protocol(skb);
- goto reclassify;
-#endif
-}
-EXPORT_SYMBOL(tc_classify);
-
#ifdef CONFIG_PROC_FS
static int psched_show(struct seq_file *seq, void *v)
{
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 40cbceed4de8..f435546c3864 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -43,6 +43,7 @@
struct atm_flow_data {
struct Qdisc *q; /* FIFO, TBF, etc. */
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
void (*old_pop)(struct atm_vcc *vcc,
struct sk_buff *skb); /* chaining */
@@ -143,7 +144,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
list_del_init(&flow->list);
pr_debug("atm_tc_put: qdisc %p\n", flow->q);
qdisc_destroy(flow->q);
- tcf_destroy_chain(&flow->filter_list);
+ tcf_block_put(flow->block);
if (flow->sock) {
pr_debug("atm_tc_put: f_count %ld\n",
file_count(flow->sock->file));
@@ -274,7 +275,13 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
error = -ENOBUFS;
goto err_out;
}
- RCU_INIT_POINTER(flow->filter_list, NULL);
+
+ error = tcf_block_get(&flow->block, &flow->filter_list);
+ if (error) {
+ kfree(flow);
+ goto err_out;
+ }
+
flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
if (!flow->q)
flow->q = &noop_qdisc;
@@ -346,14 +353,13 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow = (struct atm_flow_data *)cl;
pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
- return flow ? &flow->filter_list : &p->link.filter_list;
+ return flow ? flow->block : p->link.block;
}
/* --------------------------- Qdisc operations ---------------------------- */
@@ -377,7 +383,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
list_for_each_entry(flow, &p->flows, list) {
fl = rcu_dereference_bh(flow->filter_list);
if (fl) {
- result = tc_classify(skb, fl, &res, true);
+ result = tcf_classify(skb, fl, &res, true);
if (result < 0)
continue;
flow = (struct atm_flow_data *)res.class;
@@ -524,6 +530,7 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
+ int err;
pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
INIT_LIST_HEAD(&p->flows);
@@ -534,7 +541,11 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
- RCU_INIT_POINTER(p->link.filter_list, NULL);
+
+ err = tcf_block_get(&p->link.block, &p->link.filter_list);
+ if (err)
+ return err;
+
p->link.vcc = NULL;
p->link.sock = NULL;
p->link.classid = sch->handle;
@@ -561,7 +572,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
list_for_each_entry(flow, &p->flows, list)
- tcf_destroy_chain(&flow->filter_list);
+ tcf_block_put(flow->block);
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
@@ -646,7 +657,7 @@ static const struct Qdisc_class_ops atm_class_ops = {
.change = atm_tc_change,
.delete = atm_tc_delete,
.walk = atm_tc_walk,
- .tcf_chain = atm_tc_find_tcf,
+ .tcf_block = atm_tc_tcf_block,
.bind_tcf = atm_tc_bind_filter,
.unbind_tcf = atm_tc_put,
.dump = atm_tc_dump_class,
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 7415859fd4c3..8dd6d0aca678 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -127,6 +127,7 @@ struct cbq_class {
struct tc_cbq_xstats xstats;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
int refcnt;
int filters;
@@ -233,7 +234,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 2+n. Apply classifier.
*/
- result = tc_classify(skb, fl, &res, true);
+ result = tcf_classify(skb, fl, &res, true);
if (!fl || result < 0)
goto fallback;
@@ -1405,7 +1406,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
WARN_ON(cl->filters);
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
gen_kill_estimator(&cl->rate_est);
@@ -1430,7 +1431,7 @@ static void cbq_destroy(struct Qdisc *sch)
*/
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
}
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
@@ -1585,12 +1586,19 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (cl == NULL)
goto failure;
+ err = tcf_block_get(&cl->block, &cl->filter_list);
+ if (err) {
+ kfree(cl);
+ return err;
+ }
+
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ tcf_block_put(cl->block);
kfree(cl);
goto failure;
}
@@ -1688,8 +1696,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
return 0;
}
-static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
- unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
@@ -1697,7 +1704,7 @@ static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
if (cl == NULL)
cl = &q->link;
- return &cl->filter_list;
+ return cl->block;
}
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
@@ -1756,7 +1763,7 @@ static const struct Qdisc_class_ops cbq_class_ops = {
.change = cbq_change_class,
.delete = cbq_delete,
.walk = cbq_walk,
- .tcf_chain = cbq_find_tcf,
+ .tcf_block = cbq_tcf_block,
.bind_tcf = cbq_bind_filter,
.unbind_tcf = cbq_unbind_filter,
.dump = cbq_dump_class,
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 58a8c32eab23..5db2a2843c66 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -36,6 +36,7 @@ struct drr_class {
struct drr_sched {
struct list_head active;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct Qdisc_class_hash clhash;
};
@@ -190,15 +191,14 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg)
drr_destroy_class(sch, cl);
}
-static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct drr_sched *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
@@ -333,7 +333,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -431,6 +431,9 @@ static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
struct drr_sched *q = qdisc_priv(sch);
int err;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
@@ -462,7 +465,7 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
struct hlist_node *next;
unsigned int i;
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -477,7 +480,7 @@ static const struct Qdisc_class_ops drr_class_ops = {
.delete = drr_delete_class,
.get = drr_get_class,
.put = drr_put_class,
- .tcf_chain = drr_tcf_chain,
+ .tcf_block = drr_tcf_block,
.bind_tcf = drr_bind_tcf,
.unbind_tcf = drr_unbind_tcf,
.graft = drr_graft_class,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1c0f877f673a..7ccdd825d34e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -44,6 +44,7 @@ struct mask_value {
struct dsmark_qdisc_data {
struct Qdisc *q;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct mask_value *mv;
u16 indices;
u8 set_tc_index;
@@ -183,11 +184,11 @@ ignore:
}
}
-static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
- return &p->filter_list;
+
+ return p->block;
}
/* --------------------------- Qdisc operations ---------------------------- */
@@ -234,7 +235,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
else {
struct tcf_result res;
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
- int result = tc_classify(skb, fl, &res, false);
+ int result = tcf_classify(skb, fl, &res, false);
pr_debug("result %d class 0x%04x\n", result, res.classid);
@@ -342,6 +343,10 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
if (!opt)
goto errout;
+ err = tcf_block_get(&p->block, &p->filter_list);
+ if (err)
+ return err;
+
err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy, NULL);
if (err < 0)
goto errout;
@@ -400,7 +405,7 @@ static void dsmark_destroy(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- tcf_destroy_chain(&p->filter_list);
+ tcf_block_put(p->block);
qdisc_destroy(p->q);
if (p->mv != p->embedded)
kfree(p->mv);
@@ -468,7 +473,7 @@ static const struct Qdisc_class_ops dsmark_class_ops = {
.change = dsmark_change,
.delete = dsmark_delete,
.walk = dsmark_walk,
- .tcf_chain = dsmark_find_tcf,
+ .tcf_block = dsmark_tcf_block,
.bind_tcf = dsmark_bind_filter,
.unbind_tcf = dsmark_put,
.dump = dsmark_dump_class,
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index b488721a0059..147fde73a0f5 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -390,9 +390,17 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->stat_tcp_retrans++;
qdisc_qstats_backlog_inc(sch, skb);
if (fq_flow_is_detached(f)) {
+ struct sock *sk = skb->sk;
+
fq_flow_add_tail(&q->new_flows, f);
if (time_after(jiffies, f->age + q->flow_refill_delay))
f->credit = max_t(u32, f->credit, q->quantum);
+ if (sk && q->rate_enable) {
+ if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
+ SK_PACING_FQ))
+ smp_store_release(&sk->sk_pacing_status,
+ SK_PACING_FQ);
+ }
q->inactive_flows--;
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 9201abce928c..f201e73947fb 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -55,6 +55,7 @@ struct fq_codel_flow {
struct fq_codel_sched_data {
struct tcf_proto __rcu *filter_list; /* optional external classifier */
+ struct tcf_block *block;
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
u32 *backlogs; /* backlog table [flows_cnt] */
u32 flows_cnt; /* number of flows */
@@ -96,7 +97,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
return fq_codel_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, filter, &res, false);
+ result = tcf_classify(skb, filter, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -450,7 +451,7 @@ static void fq_codel_destroy(struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
kvfree(q->backlogs);
kvfree(q->flows);
}
@@ -459,6 +460,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
int i;
+ int err;
sch->limit = 10*1024;
q->flows_cnt = 1024;
@@ -478,6 +480,10 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
return err;
}
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
if (!q->flows) {
q->flows = kvzalloc(q->flows_cnt *
sizeof(struct fq_codel_flow), GFP_KERNEL);
@@ -589,14 +595,13 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl)
{
}
-static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
@@ -679,7 +684,7 @@ static const struct Qdisc_class_ops fq_codel_class_ops = {
.leaf = fq_codel_leaf,
.get = fq_codel_get,
.put = fq_codel_put,
- .tcf_chain = fq_codel_find_tcf,
+ .tcf_block = fq_codel_tcf_block,
.bind_tcf = fq_codel_bind,
.unbind_tcf = fq_codel_put,
.dump = fq_codel_dump_class,
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5cb82f6c1b06..a324f84b1ccd 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -116,6 +116,7 @@ struct hfsc_class {
struct gnet_stats_queue qstats;
struct net_rate_estimator __rcu *rate_est;
struct tcf_proto __rcu *filter_list; /* filter list */
+ struct tcf_block *block;
unsigned int filter_cnt; /* filter count */
unsigned int level; /* class level in hierarchy */
@@ -1040,12 +1041,19 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL)
return -ENOBUFS;
+ err = tcf_block_get(&cl->block, &cl->filter_list);
+ if (err) {
+ kfree(cl);
+ return err;
+ }
+
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
NULL,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE]);
if (err) {
+ tcf_block_put(cl->block);
kfree(cl);
return err;
}
@@ -1091,7 +1099,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
{
struct hfsc_sched *q = qdisc_priv(sch);
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
qdisc_destroy(cl->qdisc);
gen_kill_estimator(&cl->rate_est);
if (cl != &q->root)
@@ -1142,7 +1150,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
head = &q->root;
tcf = rcu_dereference_bh(q->root.filter_list);
- while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
+ while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
@@ -1261,8 +1269,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
cl->filter_cnt--;
}
-static struct tcf_proto __rcu **
-hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1270,7 +1277,7 @@ hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
if (cl == NULL)
cl = &q->root;
- return &cl->filter_list;
+ return cl->block;
}
static int
@@ -1515,7 +1522,7 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -1662,7 +1669,7 @@ static const struct Qdisc_class_ops hfsc_class_ops = {
.put = hfsc_put_class,
.bind_tcf = hfsc_bind_tcf,
.unbind_tcf = hfsc_unbind_tcf,
- .tcf_chain = hfsc_tcf_chain,
+ .tcf_block = hfsc_tcf_block,
.dump = hfsc_dump_class,
.dump_stats = hfsc_dump_class_stats,
.walk = hfsc_walk
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 570ef3b0c09b..195bbca9eb0b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -105,6 +105,7 @@ struct htb_class {
int quantum; /* but stored for parent-to-leaf return */
struct tcf_proto __rcu *filter_list; /* class attached filters */
+ struct tcf_block *block;
int filter_cnt;
int refcnt; /* usage count of this class */
@@ -156,6 +157,7 @@ struct htb_sched {
/* filters for qdisc itself */
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
#define HTB_WARN_TOOMANYEVENTS 0x1
unsigned int warned; /* only one warning */
@@ -231,7 +233,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
+ while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
@@ -1017,6 +1019,10 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
if (!opt)
return -EINVAL;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
if (err < 0)
return err;
@@ -1230,7 +1236,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
qdisc_destroy(cl->un.leaf.q);
}
gen_kill_estimator(&cl->rate_est);
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
kfree(cl);
}
@@ -1248,11 +1254,11 @@ static void htb_destroy(struct Qdisc *sch)
* because filter need its target class alive to be able to call
* unbind_filter on it (without Oops).
*/
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
- tcf_destroy_chain(&cl->filter_list);
+ tcf_block_put(cl->block);
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -1396,6 +1402,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl)
goto failure;
+ err = tcf_block_get(&cl->block, &cl->filter_list);
+ if (err) {
+ kfree(cl);
+ goto failure;
+ }
if (htb_rate_est || tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL,
&cl->rate_est,
@@ -1403,6 +1414,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_root_sleeping_running(sch),
tca[TCA_RATE] ? : &est.nla);
if (err) {
+ tcf_block_put(cl->block);
kfree(cl);
goto failure;
}
@@ -1521,14 +1533,12 @@ failure:
return err;
}
-static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
- unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
- struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
- return fl;
+ return cl ? cl->block : q->block;
}
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
@@ -1591,7 +1601,7 @@ static const struct Qdisc_class_ops htb_class_ops = {
.change = htb_change_class,
.delete = htb_delete,
.walk = htb_walk,
- .tcf_chain = htb_find_tcf,
+ .tcf_block = htb_tcf_block,
.bind_tcf = htb_bind_filter,
.unbind_tcf = htb_unbind_filter,
.dump = htb_dump_class,
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 3bab5f66c392..d8a9bebcab90 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -18,6 +18,10 @@
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+struct ingress_sched_data {
+ struct tcf_block *block;
+};
+
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
@@ -47,16 +51,23 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
}
-static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct ingress_sched_data *q = qdisc_priv(sch);
- return &dev->ingress_cl_list;
+ return q->block;
}
static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
{
+ struct ingress_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int err;
+
+ err = tcf_block_get(&q->block, &dev->ingress_cl_list);
+ if (err)
+ return err;
+
net_inc_ingress_queue();
sch->flags |= TCQ_F_CPUSTATS;
@@ -65,9 +76,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
static void ingress_destroy(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct ingress_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&dev->ingress_cl_list);
+ tcf_block_put(q->block);
net_dec_ingress_queue();
}
@@ -91,7 +102,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
.get = ingress_get,
.put = ingress_put,
.walk = ingress_walk,
- .tcf_chain = ingress_find_tcf,
+ .tcf_block = ingress_tcf_block,
.tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
@@ -100,12 +111,18 @@ static const struct Qdisc_class_ops ingress_class_ops = {
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
+ .priv_size = sizeof(struct ingress_sched_data),
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
.owner = THIS_MODULE,
};
+struct clsact_sched_data {
+ struct tcf_block *ingress_block;
+ struct tcf_block *egress_block;
+};
+
static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
{
switch (TC_H_MIN(classid)) {
@@ -128,16 +145,15 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch,
return clsact_get(sch, classid);
}
-static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct clsact_sched_data *q = qdisc_priv(sch);
switch (cl) {
case TC_H_MIN(TC_H_MIN_INGRESS):
- return &dev->ingress_cl_list;
+ return q->ingress_block;
case TC_H_MIN(TC_H_MIN_EGRESS):
- return &dev->egress_cl_list;
+ return q->egress_block;
default:
return NULL;
}
@@ -145,6 +161,18 @@ static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch,
static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int err;
+
+ err = tcf_block_get(&q->ingress_block, &dev->ingress_cl_list);
+ if (err)
+ return err;
+
+ err = tcf_block_get(&q->egress_block, &dev->egress_cl_list);
+ if (err)
+ return err;
+
net_inc_ingress_queue();
net_inc_egress_queue();
@@ -155,10 +183,10 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
static void clsact_destroy(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
+ struct clsact_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&dev->ingress_cl_list);
- tcf_destroy_chain(&dev->egress_cl_list);
+ tcf_block_put(q->egress_block);
+ tcf_block_put(q->ingress_block);
net_dec_ingress_queue();
net_dec_egress_queue();
@@ -169,7 +197,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
.get = clsact_get,
.put = ingress_put,
.walk = ingress_walk,
- .tcf_chain = clsact_find_tcf,
+ .tcf_block = clsact_tcf_block,
.tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put,
@@ -178,6 +206,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.cl_ops = &clsact_class_ops,
.id = "clsact",
+ .priv_size = sizeof(struct clsact_sched_data),
.init = clsact_init,
.destroy = clsact_destroy,
.dump = ingress_dump,
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 43a3a10b3c81..604767482ad0 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -32,6 +32,7 @@ struct multiq_sched_data {
u16 max_bands;
u16 curband;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct Qdisc **queues;
};
@@ -46,7 +47,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- err = tc_classify(skb, fl, &res, false);
+ err = tcf_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
@@ -170,7 +171,7 @@ multiq_destroy(struct Qdisc *sch)
int band;
struct multiq_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (band = 0; band < q->bands; band++)
qdisc_destroy(q->queues[band]);
@@ -243,6 +244,10 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
if (opt == NULL)
return -EINVAL;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
q->max_bands = qdisc_dev(sch)->num_tx_queues;
q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
@@ -367,14 +372,13 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct multiq_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static const struct Qdisc_class_ops multiq_class_ops = {
@@ -383,7 +387,7 @@ static const struct Qdisc_class_ops multiq_class_ops = {
.get = multiq_get,
.put = multiq_put,
.walk = multiq_walk,
- .tcf_chain = multiq_find_tcf,
+ .tcf_block = multiq_tcf_block,
.bind_tcf = multiq_bind,
.unbind_tcf = multiq_put,
.dump = multiq_dump_class,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 92c2e6d448d7..a2404688dd01 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -25,6 +25,7 @@
struct prio_sched_data {
int bands;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
};
@@ -42,7 +43,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
fl = rcu_dereference_bh(q->filter_list);
- err = tc_classify(skb, fl, &res, false);
+ err = tcf_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
@@ -145,7 +146,7 @@ prio_destroy(struct Qdisc *sch)
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
@@ -204,9 +205,16 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
static int prio_init(struct Qdisc *sch, struct nlattr *opt)
{
+ struct prio_sched_data *q = qdisc_priv(sch);
+ int err;
+
if (!opt)
return -EINVAL;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
return prio_tune(sch, opt);
}
@@ -317,14 +325,13 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static const struct Qdisc_class_ops prio_class_ops = {
@@ -333,7 +340,7 @@ static const struct Qdisc_class_ops prio_class_ops = {
.get = prio_get,
.put = prio_put,
.walk = prio_walk,
- .tcf_chain = prio_find_tcf,
+ .tcf_block = prio_tcf_block,
.bind_tcf = prio_bind,
.unbind_tcf = prio_put,
.dump = prio_dump_class,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 041eba3006cc..076ad032befb 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -182,6 +182,7 @@ struct qfq_group {
struct qfq_sched {
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
struct Qdisc_class_hash clhash;
u64 oldV, V; /* Precise virtual times. */
@@ -582,15 +583,14 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
qfq_destroy_class(sch, cl);
}
-static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct qfq_sched *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
@@ -720,7 +720,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
fl = rcu_dereference_bh(q->filter_list);
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -1438,6 +1438,10 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
int i, j, err;
u32 max_cl_shift, maxbudg_shift, max_classes;
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
+
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
@@ -1492,7 +1496,7 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
struct hlist_node *next;
unsigned int i;
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
@@ -1508,7 +1512,7 @@ static const struct Qdisc_class_ops qfq_class_ops = {
.delete = qfq_delete_class,
.get = qfq_get_class,
.put = qfq_put_class,
- .tcf_chain = qfq_tcf_chain,
+ .tcf_block = qfq_tcf_block,
.bind_tcf = qfq_bind_tcf,
.unbind_tcf = qfq_unbind_tcf,
.graft = qfq_graft_class,
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0f777273ba29..9756b1ccd345 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -56,6 +56,7 @@ struct sfb_bins {
struct sfb_sched_data {
struct Qdisc *qdisc;
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
unsigned long rehash_interval;
unsigned long warmup_time; /* double buffering warmup time in jiffies */
u32 max;
@@ -259,7 +260,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
struct tcf_result res;
int result;
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -465,7 +466,7 @@ static void sfb_destroy(struct Qdisc *sch)
{
struct sfb_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
qdisc_destroy(q->qdisc);
}
@@ -549,6 +550,11 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfb_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
q->qdisc = &noop_qdisc;
return sfb_change(sch, opt);
@@ -657,14 +663,13 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct sfb_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
@@ -682,7 +687,7 @@ static const struct Qdisc_class_ops sfb_class_ops = {
.change = sfb_change_class,
.delete = sfb_delete,
.walk = sfb_walk,
- .tcf_chain = sfb_find_tcf,
+ .tcf_block = sfb_tcf_block,
.bind_tcf = sfb_bind,
.unbind_tcf = sfb_put,
.dump = sfb_dump_class,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 332d94be6e1c..66dfd15b7946 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -126,6 +126,7 @@ struct sfq_sched_data {
u8 flags;
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct tcf_proto __rcu *filter_list;
+ struct tcf_block *block;
sfq_index *ht; /* Hash table ('divisor' slots) */
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
@@ -180,7 +181,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return sfq_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, fl, &res, false);
+ result = tcf_classify(skb, fl, &res, false);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -697,7 +698,7 @@ static void sfq_destroy(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
- tcf_destroy_chain(&q->filter_list);
+ tcf_block_put(q->block);
q->perturb_period = 0;
del_timer_sync(&q->perturb_timer);
sfq_free(q->ht);
@@ -709,6 +710,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
int i;
+ int err;
+
+ err = tcf_block_get(&q->block, &q->filter_list);
+ if (err)
+ return err;
setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
(unsigned long)sch);
@@ -815,14 +821,13 @@ static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}
-static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
{
struct sfq_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
- return &q->filter_list;
+ return q->block;
}
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
@@ -878,7 +883,7 @@ static const struct Qdisc_class_ops sfq_class_ops = {
.leaf = sfq_leaf,
.get = sfq_get,
.put = sfq_put,
- .tcf_chain = sfq_find_tcf,
+ .tcf_block = sfq_tcf_block,
.bind_tcf = sfq_bind,
.unbind_tcf = sfq_put,
.dump = sfq_dump_class,
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 4f5a2b580aa5..275925b93b29 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -35,6 +35,7 @@
static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_not_inet = 0;
return sctp_compute_cksum(skb, skb_transport_offset(skb));
}
@@ -98,6 +99,11 @@ static const struct net_offload sctp6_offload = {
},
};
+static const struct skb_checksum_ops crc32c_csum_ops = {
+ .update = sctp_csum_update,
+ .combine = sctp_csum_combine,
+};
+
int __init sctp_offload_init(void)
{
int ret;
@@ -110,6 +116,7 @@ int __init sctp_offload_init(void)
if (ret)
goto ipv4;
+ crc32c_csum_stub = &crc32c_csum_ops;
return ret;
ipv4:
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1409a875ad8e..e2edf2ebbade 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -538,6 +538,7 @@ merge:
} else {
chksum:
head->ip_summed = CHECKSUM_PARTIAL;
+ head->csum_not_inet = 1;
head->csum_start = skb_transport_header(head) - head->head;
head->csum_offset = offsetof(struct sctphdr, checksum);
}
diff --git a/net/socket.c b/net/socket.c
index c2564eb25c6b..8f9dab330d57 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -461,7 +461,7 @@ EXPORT_SYMBOL(sock_from_file);
* @err: pointer to an error code return
*
* The file handle passed in is locked and the socket it is bound
- * too is returned. If an error occurs the err pointer is overwritten
+ * to is returned. If an error occurs the err pointer is overwritten
* with a negative errno code and NULL is returned. The function checks
* for both invalid handles and passing a handle which is not a socket.
*
@@ -662,6 +662,40 @@ static bool skb_is_err_queue(const struct sk_buff *skb)
return skb->pkt_type == PACKET_OUTGOING;
}
+/* On transmit, software and hardware timestamps are returned independently.
+ * As the two skb clones share the hardware timestamp, which may be updated
+ * before the software timestamp is received, a hardware TX timestamp may be
+ * returned only if there is no software TX timestamp. Ignore false software
+ * timestamps, which may be made in the __sock_recv_timestamp() call when the
+ * option SO_TIMESTAMP(NS) is enabled on the socket, even when the skb has a
+ * hardware timestamp.
+ */
+static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
+{
+ return skb->tstamp && !false_tstamp && skb_is_err_queue(skb);
+}
+
+static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb)
+{
+ struct scm_ts_pktinfo ts_pktinfo;
+ struct net_device *orig_dev;
+
+ if (!skb_mac_header_was_set(skb))
+ return;
+
+ memset(&ts_pktinfo, 0, sizeof(ts_pktinfo));
+
+ rcu_read_lock();
+ orig_dev = dev_get_by_napi_id(skb_napi_id(skb));
+ if (orig_dev)
+ ts_pktinfo.if_index = orig_dev->ifindex;
+ rcu_read_unlock();
+
+ ts_pktinfo.pkt_length = skb->len - skb_mac_offset(skb);
+ put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_PKTINFO,
+ sizeof(ts_pktinfo), &ts_pktinfo);
+}
+
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
@@ -670,14 +704,16 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
{
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
struct scm_timestamping tss;
- int empty = 1;
+ int empty = 1, false_tstamp = 0;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
- if (need_software_tstamp && skb->tstamp == 0)
+ if (need_software_tstamp && skb->tstamp == 0) {
__net_timestamp(skb);
+ false_tstamp = 1;
+ }
if (need_software_tstamp) {
if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
@@ -699,8 +735,13 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
empty = 0;
if (shhwtstamps &&
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
- ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2))
+ !skb_is_swtx_tstamp(skb, false_tstamp) &&
+ ktime_to_timespec_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
empty = 0;
+ if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ !skb_is_err_queue(skb))
+ put_ts_pktinfo(msg, skb);
+ }
if (!empty) {
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss);
diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh
index d85968cb1bf2..112deba8c4e9 100755
--- a/tools/hv/bondvf.sh
+++ b/tools/hv/bondvf.sh
@@ -102,15 +102,30 @@ function create_bond_cfg_redhat {
}
function del_eth_cfg_ubuntu {
- local fn=$cfgdir/interfaces
+ local mainfn=$cfgdir/interfaces
+ local fnlist=( $mainfn )
+
+ local dirlist=(`awk '/^[ \t]*source/{print $2}' $mainfn`)
+
+ local i
+ for i in "${dirlist[@]}"
+ do
+ fnlist+=(`ls $i 2>/dev/null`)
+ done
+
local tmpfl=$(mktemp)
local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1
local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)'
- awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl
+ local fn
+ for fn in "${fnlist[@]}"
+ do
+ awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" \
+ $fn >$tmpfl
- cp $tmpfl $fn
+ cp $tmpfl $fn
+ done
rm $tmpfl
}