summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c48
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/base/regmap/regmap.c6
-rw-r--r--drivers/bluetooth/Kconfig23
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/bcm203x.c4
-rw-r--r--drivers/bluetooth/btqca.c8
-rw-r--r--drivers/bluetooth/btqcomsmd.c182
-rw-r--r--drivers/bluetooth/btrtl.c107
-rw-r--r--drivers/bluetooth/btusb.c14
-rw-r--r--drivers/bluetooth/btwilink.c4
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_bcsp.c128
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c34
-rw-r--r--drivers/bluetooth/hci_mrvl.c387
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_uart.h9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c6
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c2
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/firmware/efi/efi.c7
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c169
-rw-r--r--drivers/firmware/efi/libstub/fdt.c54
-rw-r--r--drivers/firmware/efi/libstub/random.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c27
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c10
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c18
-rw-r--r--drivers/i2c/busses/i2c-qup.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c14
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c37
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/input/joydev.c6
-rw-r--r--drivers/input/touchscreen/silead.c16
-rw-r--r--drivers/irqchip/irq-atmel-aic.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c5
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/irqchip/irq-mips-gic.c105
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/raid5-cache.c46
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/cec-edid.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c42
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c16
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c16
-rw-r--r--drivers/media/platform/rcar-fcp.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c14
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/omap_hsmmc.c16
-rw-r--r--drivers/mmc/host/sdhci-st.c15
-rw-r--r--drivers/mtd/nand/davinci_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c12
-rw-r--r--drivers/mtd/nand/mtk_nand.c7
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/can/dev.c27
-rw-r--r--drivers/net/can/flexcan.c13
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c11
-rw-r--r--drivers/net/dsa/b53/b53_common.c37
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1255
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c34
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c86
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h155
-rw-r--r--drivers/net/dsa/qca8k.c24
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c90
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c18
-rw-r--r--drivers/net/ethernet/broadcom/b44.c116
-rw-r--r--drivers/net/ethernet/broadcom/b44.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c79
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c123
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c135
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1251
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c179
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c24
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c122
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c721
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c981
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c483
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c248
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c12
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c99
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c132
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c34
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c139
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c187
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c58
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c206
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c603
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h111
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c407
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c210
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c478
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h233
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h202
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c1813
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c171
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c142
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c294
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig14
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c274
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1792
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h316
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2954
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c41
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c314
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c37
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c115
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c26
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker.h15
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c120
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c115
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c14
-rw-r--r--drivers/net/ethernet/sfc/sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/sriov.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/hamradio/6pack.c12
-rw-r--r--drivers/net/hyperv/hyperv_net.h14
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c188
-rw-r--r--drivers/net/ieee802154/fakelb.c14
-rw-r--r--drivers/net/ipvlan/ipvlan.h6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c94
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c87
-rw-r--r--drivers/net/phy/mdio-xgene.c6
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/mscc.c176
-rw-r--r--drivers/net/usb/r8152.c281
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c107
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h32
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c55
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c233
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c325
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c397
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c37
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h7
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c144
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c49
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h208
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c36
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c76
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c48
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h12
-rw-r--r--drivers/net/xen-netback/common.h8
-rw-r--r--drivers/net/xen-netback/hash.c68
-rw-r--r--drivers/net/xen-netback/interface.c38
-rw-r--r--drivers/net/xen-netback/netback.c18
-rw-r--r--drivers/net/xen-netback/xenbus.c83
-rw-r--r--drivers/net/xen-netfront.c15
-rw-r--r--drivers/nvdimm/core.c8
-rw-r--r--drivers/nvdimm/nd.h22
-rw-r--r--drivers/nvdimm/region_devs.c22
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c144
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pcmcia/ds.c12
-rw-r--r--drivers/pcmcia/pxa2xx_base.c9
-rw-r--r--drivers/pcmcia/pxa2xx_base.h2
-rw-r--r--drivers/pcmcia/sa1111_badge4.c22
-rw-r--r--drivers/pcmcia/sa1111_generic.c22
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c25
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c32
-rw-r--r--drivers/pcmcia/sa1111_neponset.c26
-rw-r--r--drivers/pcmcia/sa11xx_base.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/ptp/ptp_clock.c1
-rw-r--r--drivers/rapidio/rio_cm.c19
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c32
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c29
-rw-r--r--drivers/s390/net/qeth_l3_sys.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/scsi.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/staging/media/cec/TODO1
-rw-r--r--drivers/staging/media/cec/cec-adap.c26
-rw-r--r--drivers/staging/media/cec/cec-api.c12
-rw-r--r--drivers/staging/media/cec/cec-core.c27
-rw-r--r--drivers/staging/media/pulse8-cec/pulse8-cec.c10
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c3
-rw-r--r--drivers/usb/core/config.c28
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
473 files changed, 22752 insertions, 7042 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 80cc7c089a15..e1d5ea6d5e40 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
return to_acpi_device(acpi_desc->dev);
}
-static int xlat_status(void *buf, unsigned int cmd)
+static int xlat_status(void *buf, unsigned int cmd, u32 status)
{
struct nd_cmd_clear_error *clear_err;
struct nd_cmd_ars_status *ars_status;
- struct nd_cmd_ars_start *ars_start;
- struct nd_cmd_ars_cap *ars_cap;
u16 flags;
switch (cmd) {
case ND_CMD_ARS_CAP:
- ars_cap = buf;
- if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
+ if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
return -ENOTTY;
/* Command failed */
- if (ars_cap->status & 0xffff)
+ if (status & 0xffff)
return -EIO;
/* No supported scan types for this range */
flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
- if ((ars_cap->status >> 16 & flags) == 0)
+ if ((status >> 16 & flags) == 0)
return -ENOTTY;
break;
case ND_CMD_ARS_START:
- ars_start = buf;
/* ARS is in progress */
- if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
+ if ((status & 0xffff) == NFIT_ARS_START_BUSY)
return -EBUSY;
/* Command failed */
- if (ars_start->status & 0xffff)
+ if (status & 0xffff)
return -EIO;
break;
case ND_CMD_ARS_STATUS:
ars_status = buf;
/* Command failed */
- if (ars_status->status & 0xffff)
+ if (status & 0xffff)
return -EIO;
/* Check extended status (Upper two bytes) */
- if (ars_status->status == NFIT_ARS_STATUS_DONE)
+ if (status == NFIT_ARS_STATUS_DONE)
return 0;
/* ARS is in progress */
- if (ars_status->status == NFIT_ARS_STATUS_BUSY)
+ if (status == NFIT_ARS_STATUS_BUSY)
return -EBUSY;
/* No ARS performed for the current boot */
- if (ars_status->status == NFIT_ARS_STATUS_NONE)
+ if (status == NFIT_ARS_STATUS_NONE)
return -EAGAIN;
/*
@@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd)
* agent wants the scan to stop. If we didn't overflow
* then just continue with the returned results.
*/
- if (ars_status->status == NFIT_ARS_STATUS_INTR) {
+ if (status == NFIT_ARS_STATUS_INTR) {
if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
return -ENOSPC;
return 0;
}
/* Unknown status */
- if (ars_status->status >> 16)
+ if (status >> 16)
return -EIO;
break;
case ND_CMD_CLEAR_ERROR:
clear_err = buf;
- if (clear_err->status & 0xffff)
+ if (status & 0xffff)
return -EIO;
if (!clear_err->cleared)
return -EIO;
@@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd)
break;
}
+ /* all other non-zero status results in an error */
+ if (status)
+ return -EIO;
return 0;
}
@@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nd_cmd_pkg *call_pkg = NULL;
const char *cmd_name, *dimm_name;
unsigned long cmd_mask, dsm_mask;
+ u32 offset, fw_status = 0;
acpi_handle handle;
unsigned int func;
const u8 *uuid;
- u32 offset;
int rc, i;
func = cmd;
@@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
out_obj->buffer.pointer + offset, out_size);
offset += out_size;
}
+
+ /*
+ * Set fw_status for all the commands with a known format to be
+ * later interpreted by xlat_status().
+ */
+ if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
+ || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+ fw_status = *(u32 *) out_obj->buffer.pointer;
+
if (offset + in_buf.buffer.length < buf_len) {
if (i >= 1) {
/*
@@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
*/
rc = buf_len - offset - in_buf.buffer.length;
if (cmd_rc)
- *cmd_rc = xlat_status(buf, cmd);
+ *cmd_rc = xlat_status(buf, cmd, fw_status);
} else {
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
__func__, dimm_name, cmd_name, buf_len,
@@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
} else {
rc = 0;
if (cmd_rc)
- *cmd_rc = xlat_status(buf, cmd);
+ *cmd_rc = xlat_status(buf, cmd, fw_status);
}
out:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 17995fadebd7..82a081ea4317 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend(dev, rpmflags);
+ trace_rpm_suspend_rcuidle(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 25d26bb18970..e964d068874d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
- regcache_drop_region(map, reg, reg + 1);
+ /* regcache_drop_region() takes lock that we already have,
+ * thus call map->cache_ops->drop() directly
+ */
+ if (map->cache_ops && map->cache_ops->drop)
+ map->cache_ops->drop(map, reg, reg + 1);
}
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index cf50fd2e96df..3cc9bff9d99d 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -180,6 +180,17 @@ config BT_HCIUART_AG6XX
Say Y here to compile support for Intel AG6XX protocol.
+config BT_HCIUART_MRVL
+ bool "Marvell protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ help
+ Marvell is serial protocol for communication between Bluetooth
+ device and host. This protocol is required for most Marvell Bluetooth
+ devices with UART interface.
+
+ Say Y here to compile support for HCI MRVL protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
@@ -331,4 +342,16 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_QCOMSMD
+ tristate "Qualcomm SMD based HCI support"
+ depends on QCOM_SMD && QCOM_WCNSS_CTRL
+ select BT_QCA
+ help
+ Qualcomm SMD based HCI driver.
+ This driver is used to bridge HCI data onto the shared memory
+ channels to the WCNSS core.
+
+ Say Y here to compile support for HCI over Qualcomm SMD into the
+ kernel or say M to compile as a module.
+
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 9c18939fc5c9..b1fc29a697b7 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
@@ -37,6 +38,7 @@ hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
+hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5b0ef7bbe8ac..5ce6d4176dc3 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -185,10 +185,8 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
data->state = BCM203X_LOAD_MINIDRV;
data->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!data->urb) {
- BT_ERR("Can't allocate URB");
+ if (!data->urb)
return -ENOMEM;
- }
if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
BT_ERR("Mini driver request failed");
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 4a6208168850..28afd5d585f9 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -55,8 +55,8 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl || !edl->data) {
- BT_ERR("%s: TLV with no header or no data", hdev->name);
+ if (!edl) {
+ BT_ERR("%s: TLV with no header", hdev->name);
err = -EILSEQ;
goto out;
}
@@ -224,8 +224,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl || !edl->data) {
- BT_ERR("%s: TLV with no header or no data", hdev->name);
+ if (!edl) {
+ BT_ERR("%s: TLV with no header", hdev->name);
err = -EILSEQ;
goto out;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
new file mode 100644
index 000000000000..08c2c93887c1
--- /dev/null
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+#include <linux/platform_device.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+struct btqcomsmd {
+ struct hci_dev *hdev;
+
+ struct qcom_smd_channel *acl_channel;
+ struct qcom_smd_channel *cmd_channel;
+};
+
+static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type,
+ const void *data, size_t count)
+{
+ struct sk_buff *skb;
+
+ /* Use GFP_ATOMIC as we're in IRQ context */
+ skb = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+ return -ENOMEM;
+ }
+
+ hci_skb_pkt_type(skb) = type;
+ memcpy(skb_put(skb, count), data, count);
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btqcomsmd_acl_callback(struct qcom_smd_channel *channel,
+ const void *data, size_t count)
+{
+ struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+
+ btq->hdev->stat.byte_rx += count;
+ return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count);
+}
+
+static int btqcomsmd_cmd_callback(struct qcom_smd_channel *channel,
+ const void *data, size_t count)
+{
+ struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+
+ return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count);
+}
+
+static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btqcomsmd *btq = hci_get_drvdata(hdev);
+ int ret;
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_ACLDATA_PKT:
+ ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len);
+ hdev->stat.acl_tx++;
+ hdev->stat.byte_tx += skb->len;
+ break;
+ case HCI_COMMAND_PKT:
+ ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len);
+ hdev->stat.cmd_tx++;
+ break;
+ default:
+ ret = -EILSEQ;
+ break;
+ }
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
+static int btqcomsmd_open(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int btqcomsmd_close(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int btqcomsmd_probe(struct platform_device *pdev)
+{
+ struct btqcomsmd *btq;
+ struct hci_dev *hdev;
+ void *wcnss;
+ int ret;
+
+ btq = devm_kzalloc(&pdev->dev, sizeof(*btq), GFP_KERNEL);
+ if (!btq)
+ return -ENOMEM;
+
+ wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL",
+ btqcomsmd_acl_callback);
+ if (IS_ERR(btq->acl_channel))
+ return PTR_ERR(btq->acl_channel);
+
+ btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
+ btqcomsmd_cmd_callback);
+ if (IS_ERR(btq->cmd_channel))
+ return PTR_ERR(btq->cmd_channel);
+
+ qcom_smd_set_drvdata(btq->acl_channel, btq);
+ qcom_smd_set_drvdata(btq->cmd_channel, btq);
+
+ hdev = hci_alloc_dev();
+ if (!hdev)
+ return -ENOMEM;
+
+ hci_set_drvdata(hdev, btq);
+ btq->hdev = hdev;
+ SET_HCIDEV_DEV(hdev, &pdev->dev);
+
+ hdev->bus = HCI_SMD;
+ hdev->open = btqcomsmd_open;
+ hdev->close = btqcomsmd_close;
+ hdev->send = btqcomsmd_send;
+ hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ ret = hci_register_dev(hdev);
+ if (ret < 0) {
+ hci_free_dev(hdev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, btq);
+
+ return 0;
+}
+
+static int btqcomsmd_remove(struct platform_device *pdev)
+{
+ struct btqcomsmd *btq = platform_get_drvdata(pdev);
+
+ hci_unregister_dev(btq->hdev);
+ hci_free_dev(btq->hdev);
+
+ return 0;
+}
+
+static const struct of_device_id btqcomsmd_of_match[] = {
+ { .compatible = "qcom,wcnss-bt", },
+ { },
+};
+
+static struct platform_driver btqcomsmd_driver = {
+ .probe = btqcomsmd_probe,
+ .remove = btqcomsmd_remove,
+ .driver = {
+ .name = "btqcomsmd",
+ .of_match_table = btqcomsmd_of_match,
+ },
+};
+
+module_platform_driver(btqcomsmd_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD HCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 84288938f7f2..fc9b25703c67 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -33,6 +33,7 @@
#define RTL_ROM_LMP_8723B 0x8723
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
+#define RTL_ROM_LMP_8822B 0x8822
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
@@ -78,11 +79,15 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
const unsigned char *patch_length_base, *patch_offset_base;
u32 patch_offset = 0;
u16 patch_length, num_patches;
- const u16 project_id_to_lmp_subver[] = {
- RTL_ROM_LMP_8723A,
- RTL_ROM_LMP_8723B,
- RTL_ROM_LMP_8821A,
- RTL_ROM_LMP_8761A
+ static const struct {
+ __u16 lmp_subver;
+ __u8 id;
+ } project_id_to_lmp_subver[] = {
+ { RTL_ROM_LMP_8723A, 0 },
+ { RTL_ROM_LMP_8723B, 1 },
+ { RTL_ROM_LMP_8821A, 2 },
+ { RTL_ROM_LMP_8761A, 3 },
+ { RTL_ROM_LMP_8822B, 8 },
};
ret = rtl_read_rom_version(hdev, &rom_version);
@@ -134,14 +139,20 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
return -EINVAL;
}
- if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+ /* Find project_id in table */
+ for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
+ if (project_id == project_id_to_lmp_subver[i].id)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
BT_ERR("%s: unknown project id %d", hdev->name, project_id);
return -EINVAL;
}
- if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+ if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) {
BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[project_id], lmp_subver);
+ project_id_to_lmp_subver[i].lmp_subver, lmp_subver);
return -EINVAL;
}
@@ -257,6 +268,26 @@ out:
return ret;
}
+static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
+{
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading %s", hdev->name, name);
+ ret = request_firmware(&fw, name, &hdev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load %s", hdev->name, name);
+ return ret;
+ }
+
+ ret = fw->size;
+ *buff = kmemdup(fw->data, ret, GFP_KERNEL);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
{
const struct firmware *fw;
@@ -296,25 +327,74 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
unsigned char *fw_data = NULL;
const struct firmware *fw;
int ret;
+ int cfg_sz;
+ u8 *cfg_buff = NULL;
+ u8 *tbuff;
+ char *cfg_name = NULL;
+
+ switch (lmp_subver) {
+ case RTL_ROM_LMP_8723B:
+ cfg_name = "rtl_bt/rtl8723b_config.bin";
+ break;
+ case RTL_ROM_LMP_8821A:
+ cfg_name = "rtl_bt/rtl8821a_config.bin";
+ break;
+ case RTL_ROM_LMP_8761A:
+ cfg_name = "rtl_bt/rtl8761a_config.bin";
+ break;
+ case RTL_ROM_LMP_8822B:
+ cfg_name = "rtl_bt/rtl8822b_config.bin";
+ break;
+ default:
+ BT_ERR("%s: rtl: no config according to lmp_subver %04x",
+ hdev->name, lmp_subver);
+ break;
+ }
+
+ if (cfg_name) {
+ cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
+ if (cfg_sz < 0)
+ cfg_sz = 0;
+ } else
+ cfg_sz = 0;
BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
ret = request_firmware(&fw, fw_name, &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- return ret;
+ goto err_req_fw;
}
ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
if (ret < 0)
goto out;
+ if (cfg_sz) {
+ tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL);
+ if (!tbuff) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(tbuff, fw_data, ret);
+ kfree(fw_data);
+
+ memcpy(tbuff + ret, cfg_buff, cfg_sz);
+ ret += cfg_sz;
+
+ fw_data = tbuff;
+ }
+
+ BT_INFO("cfg_sz %d, total size %d", cfg_sz, ret);
+
ret = rtl_download_firmware(hdev, fw_data, ret);
- kfree(fw_data);
- if (ret < 0)
- goto out;
out:
release_firmware(fw);
+ kfree(fw_data);
+err_req_fw:
+ if (cfg_sz)
+ kfree(cfg_buff);
return ret;
}
@@ -377,6 +457,9 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
case RTL_ROM_LMP_8761A:
return btrtl_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8761a_fw.bin");
+ case RTL_ROM_LMP_8822B:
+ return btrtl_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8822b_fw.bin");
default:
BT_INFO("rtl: assuming no firmware upload needed.");
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 811f9b97e360..6bd63b84abd0 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -62,6 +62,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_REALTEK 0x20000
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
+#define BTUSB_CW6622 0x100000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -248,9 +249,11 @@ static const struct usb_device_id blacklist_table[] = {
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -290,7 +293,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC },
/* CONWISE Technology based adapters with buggy SCO support */
- { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
+ { USB_DEVICE(0x0e5e, 0x6622),
+ .driver_info = BTUSB_BROKEN_ISOC | BTUSB_CW6622},
/* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
{ USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
@@ -2221,9 +2225,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
- if (err == 1) {
+ if (err == -EINTR) {
BT_ERR("%s: Firmware loading interrupted", hdev->name);
- err = -EINTR;
goto done;
}
@@ -2275,7 +2278,7 @@ done:
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
BT_ERR("%s: Device boot interrupted", hdev->name);
return -EINTR;
}
@@ -2845,6 +2848,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+ if (id->driver_info & BTUSB_CW6622)
+ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+
if (id->driver_info & BTUSB_BCM2045)
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 485281b3f167..ef51c9c864c5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -245,6 +245,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
struct ti_st *hst;
long len;
+ int pkt_type;
hst = hci_get_drvdata(hdev);
@@ -258,6 +259,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
* Freeing skb memory is taken care in shared transport layer,
* so don't free skb memory here.
*/
+ pkt_type = hci_skb_pkt_type(skb);
len = hst->st_write(skb);
if (len < 0) {
kfree_skb(skb);
@@ -268,7 +270,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
/* ST accepted our skb. So, Go ahead and do rest */
hdev->stat.byte_tx += len;
- ti_st_tx_complete(hst, hci_skb_pkt_type(skb));
+ ti_st_tx_complete(hst, pkt_type);
return 0;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 1c97eda8bae3..5ccb90ef0146 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -798,7 +798,7 @@ static int bcm_remove(struct platform_device *pdev)
static const struct hci_uart_proto bcm_proto = {
.id = HCI_UART_BCM,
- .name = "BCM",
+ .name = "Broadcom",
.manufacturer = 15,
.init_speed = 115200,
.oper_speed = 4000000,
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d7d23ceba4d1..a2c921faaa12 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -90,7 +90,8 @@ struct bcsp_struct {
/* ---- BCSP CRC calculation ---- */
/* Table for calculating CRC for polynomial 0x1021, LSB processed first,
-initial value 0xffff, bits shifted in reverse order. */
+ * initial value 0xffff, bits shifted in reverse order.
+ */
static const u16 crc_table[] = {
0x0000, 0x1081, 0x2102, 0x3183,
@@ -174,7 +175,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
}
static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
- int len, int pkt_type)
+ int len, int pkt_type)
{
struct sk_buff *nskb;
u8 hdr[4], chan;
@@ -213,6 +214,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
/* Vendor specific commands */
if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) {
u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
+
if ((desc & 0xf0) == 0xc0) {
data += HCI_COMMAND_HDR_SIZE + 1;
len -= HCI_COMMAND_HDR_SIZE + 1;
@@ -271,8 +273,8 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
/* Put CRC */
if (bcsp->use_crc) {
bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc);
- bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff));
- bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff));
+ bcsp_slip_one_byte(nskb, (u8)((bcsp_txmsg_crc >> 8) & 0x00ff));
+ bcsp_slip_one_byte(nskb, (u8)(bcsp_txmsg_crc & 0x00ff));
}
bcsp_slip_msgdelim(nskb);
@@ -287,7 +289,8 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
struct sk_buff *skb;
/* First of all, check for unreliable messages in the queue,
- since they have priority */
+ * since they have priority
+ */
skb = skb_dequeue(&bcsp->unrel);
if (skb != NULL) {
@@ -414,7 +417,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
/* spot "conf" pkts and reply with a "conf rsp" pkt */
if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 &&
- !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) {
+ !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) {
struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC);
BT_DBG("Found a LE conf pkt");
@@ -428,7 +431,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
}
/* Spot "sync" pkts. If we find one...disaster! */
else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 &&
- !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) {
+ !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) {
BT_ERR("Found a LE sync pkt, card has reset");
}
}
@@ -446,7 +449,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
default:
memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, byte);
bcsp->rx_count--;
}
@@ -457,7 +460,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
case 0xdc:
memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xc0);
bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
bcsp->rx_count--;
@@ -466,7 +469,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
case 0xdd:
memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xdb);
bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
bcsp->rx_count--;
@@ -485,13 +488,28 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
static void bcsp_complete_rx_pkt(struct hci_uart *hu)
{
struct bcsp_struct *bcsp = hu->priv;
- int pass_up;
+ int pass_up = 0;
if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */
BT_DBG("Received seqno %u from card", bcsp->rxseq_txack);
- bcsp->rxseq_txack++;
- bcsp->rxseq_txack %= 0x8;
- bcsp->txack_req = 1;
+
+ /* check the rx sequence number is as expected */
+ if ((bcsp->rx_skb->data[0] & 0x07) == bcsp->rxseq_txack) {
+ bcsp->rxseq_txack++;
+ bcsp->rxseq_txack %= 0x8;
+ } else {
+ /* handle re-transmitted packet or
+ * when packet was missed
+ */
+ BT_ERR("Out-of-order packet arrived, got %u expected %u",
+ bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
+
+ /* do not process out-of-order packet payload */
+ pass_up = 2;
+ }
+
+ /* send current txack value to all received reliable packets */
+ bcsp->txack_req = 1;
/* If needed, transmit an ack pkt */
hci_uart_tx_wakeup(hu);
@@ -500,26 +518,33 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07;
BT_DBG("Request for pkt %u from card", bcsp->rxack);
+ /* handle received ACK indications,
+ * including those from out-of-order packets
+ */
bcsp_pkt_cull(bcsp);
- if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
- bcsp->rx_skb->data[0] & 0x80) {
- hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT;
- pass_up = 1;
- } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
- bcsp->rx_skb->data[0] & 0x80) {
- hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT;
- pass_up = 1;
- } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
- hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT;
- pass_up = 1;
- } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
- !(bcsp->rx_skb->data[0] & 0x80)) {
- bcsp_handle_le_pkt(hu);
- pass_up = 0;
- } else
- pass_up = 0;
-
- if (!pass_up) {
+
+ if (pass_up != 2) {
+ if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
+ (bcsp->rx_skb->data[0] & 0x80)) {
+ hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT;
+ pass_up = 1;
+ } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
+ (bcsp->rx_skb->data[0] & 0x80)) {
+ hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT;
+ pass_up = 1;
+ } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
+ hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT;
+ pass_up = 1;
+ } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
+ !(bcsp->rx_skb->data[0] & 0x80)) {
+ bcsp_handle_le_pkt(hu);
+ pass_up = 0;
+ } else {
+ pass_up = 0;
+ }
+ }
+
+ if (pass_up == 0) {
struct hci_event_hdr hdr;
u8 desc = (bcsp->rx_skb->data[1] & 0x0f);
@@ -537,18 +562,23 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
hci_recv_frame(hu->hdev, bcsp->rx_skb);
} else {
BT_ERR("Packet for unknown channel (%u %s)",
- bcsp->rx_skb->data[1] & 0x0f,
- bcsp->rx_skb->data[0] & 0x80 ?
- "reliable" : "unreliable");
+ bcsp->rx_skb->data[1] & 0x0f,
+ bcsp->rx_skb->data[0] & 0x80 ?
+ "reliable" : "unreliable");
kfree_skb(bcsp->rx_skb);
}
} else
kfree_skb(bcsp->rx_skb);
- } else {
+ } else if (pass_up == 1) {
/* Pull out BCSP hdr */
skb_pull(bcsp->rx_skb, 4);
hci_recv_frame(hu->hdev, bcsp->rx_skb);
+ } else {
+ /* ignore packet payload of already ACKed re-transmitted
+ * packets or when a packet was missed in the BCSP window
+ */
+ kfree_skb(bcsp->rx_skb);
}
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -567,7 +597,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
const unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
- hu, count, bcsp->rx_state, bcsp->rx_count);
+ hu, count, bcsp->rx_state, bcsp->rx_count);
ptr = data;
while (count) {
@@ -586,24 +616,14 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
switch (bcsp->rx_state) {
case BCSP_W4_BCSP_HDR:
- if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] +
- bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
+ if ((0xff & (u8)~(bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] +
+ bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
}
- if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */
- && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
- BT_ERR("Out-of-order packet arrived, got %u expected %u",
- bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
-
- kfree_skb(bcsp->rx_skb);
- bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
- bcsp->rx_count = 0;
- continue;
- }
bcsp->rx_state = BCSP_W4_DATA;
bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) +
(bcsp->rx_skb->data[2] << 4); /* May be 0 */
@@ -620,8 +640,8 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
case BCSP_W4_CRC:
if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
BT_ERR("Checksum failed: computed %04x received %04x",
- bitrev16(bcsp->message_crc),
- bscp_get_crc(bcsp));
+ bitrev16(bcsp->message_crc),
+ bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -679,7 +699,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
/* Arrange to retransmit all messages in the relq. */
static void bcsp_timed_event(unsigned long arg)
{
- struct hci_uart *hu = (struct hci_uart *) arg;
+ struct hci_uart *hu = (struct hci_uart *)arg;
struct bcsp_struct *bcsp = hu->priv;
struct sk_buff *skb;
unsigned long flags;
@@ -715,7 +735,7 @@ static int bcsp_open(struct hci_uart *hu)
init_timer(&bcsp->tbcsp);
bcsp->tbcsp.function = bcsp_timed_event;
- bcsp->tbcsp.data = (u_long) hu;
+ bcsp->tbcsp.data = (u_long)hu;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index ed0a4201b551..9e271286c5e5 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -128,7 +128,7 @@ static int intel_wait_booting(struct hci_uart *hu)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hu->hdev, "Device boot interrupted");
return -EINTR;
}
@@ -151,7 +151,7 @@ static int intel_wait_lpm_transaction(struct hci_uart *hu)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hu->hdev, "LPM transaction interrupted");
return -EINTR;
}
@@ -813,7 +813,7 @@ static int intel_setup(struct hci_uart *hu)
err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hdev, "Firmware loading interrupted");
err = -EINTR;
goto done;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index dda97398c59a..9497c469efd2 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -697,34 +697,36 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
case HCIUARTSETPROTO:
if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
err = hci_uart_set_proto(hu, arg);
- if (err) {
+ if (err)
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
- return err;
- }
} else
- return -EBUSY;
+ err = -EBUSY;
break;
case HCIUARTGETPROTO:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return hu->proto->id;
- return -EUNATCH;
+ err = hu->proto->id;
+ else
+ err = -EUNATCH;
+ break;
case HCIUARTGETDEVICE:
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
- return hu->hdev->id;
- return -EUNATCH;
+ err = hu->hdev->id;
+ else
+ err = -EUNATCH;
+ break;
case HCIUARTSETFLAGS:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return -EBUSY;
- err = hci_uart_set_flags(hu, arg);
- if (err)
- return err;
+ err = -EBUSY;
+ else
+ err = hci_uart_set_flags(hu, arg);
break;
case HCIUARTGETFLAGS:
- return hu->hdev_flags;
+ err = hu->hdev_flags;
+ break;
default:
err = n_tty_ioctl_helper(tty, file, cmd, arg);
@@ -810,6 +812,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_AG6XX
ag6xx_init();
#endif
+#ifdef CONFIG_BT_HCIUART_MRVL
+ mrvl_init();
+#endif
return 0;
}
@@ -845,6 +850,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_AG6XX
ag6xx_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_MRVL
+ mrvl_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
new file mode 100644
index 000000000000..bbc4b39b1dbf
--- /dev/null
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -0,0 +1,387 @@
+/*
+ *
+ * Bluetooth HCI UART driver for marvell devices
+ *
+ * Copyright (C) 2016 Marvell International Ltd.
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define HCI_FW_REQ_PKT 0xA5
+#define HCI_CHIP_VER_PKT 0xAA
+
+#define MRVL_ACK 0x5A
+#define MRVL_NAK 0xBF
+#define MRVL_RAW_DATA 0x1F
+
+enum {
+ STATE_CHIP_VER_PENDING,
+ STATE_FW_REQ_PENDING,
+};
+
+struct mrvl_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ struct sk_buff_head rawq;
+ unsigned long flags;
+ unsigned int tx_len;
+ u8 id, rev;
+};
+
+struct hci_mrvl_pkt {
+ __le16 lhs;
+ __le16 rhs;
+} __packed;
+#define HCI_MRVL_PKT_SIZE 4
+
+static int mrvl_open(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl;
+
+ BT_DBG("hu %p", hu);
+
+ mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
+ if (!mrvl)
+ return -ENOMEM;
+
+ skb_queue_head_init(&mrvl->txq);
+ skb_queue_head_init(&mrvl->rawq);
+
+ set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
+
+ hu->priv = mrvl;
+ return 0;
+}
+
+static int mrvl_close(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&mrvl->txq);
+ skb_queue_purge(&mrvl->rawq);
+ kfree_skb(mrvl->rx_skb);
+ kfree(mrvl);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int mrvl_flush(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&mrvl->txq);
+ skb_queue_purge(&mrvl->rawq);
+
+ return 0;
+}
+
+static struct sk_buff *mrvl_dequeue(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&mrvl->txq);
+ if (!skb) {
+ /* Any raw data ? */
+ skb = skb_dequeue(&mrvl->rawq);
+ } else {
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ }
+
+ return skb;
+}
+
+static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ skb_queue_tail(&mrvl->txq, skb);
+ return 0;
+}
+
+static void mrvl_send_ack(struct hci_uart *hu, unsigned char type)
+{
+ struct mrvl_data *mrvl = hu->priv;
+ struct sk_buff *skb;
+
+ /* No H4 payload, only 1 byte header */
+ skb = bt_skb_alloc(0, GFP_ATOMIC);
+ if (!skb) {
+ bt_dev_err(hu->hdev, "Unable to alloc ack/nak packet");
+ return;
+ }
+ hci_skb_pkt_type(skb) = type;
+
+ skb_queue_tail(&mrvl->txq, skb);
+ hci_uart_tx_wakeup(hu);
+}
+
+static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_mrvl_pkt *pkt = (void *)skb->data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ int ret = 0;
+
+ if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
+ bt_dev_err(hdev, "Corrupted mrvl header");
+ mrvl_send_ack(hu, MRVL_NAK);
+ ret = -EINVAL;
+ goto done;
+ }
+ mrvl_send_ack(hu, MRVL_ACK);
+
+ if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags)) {
+ bt_dev_err(hdev, "Received unexpected firmware request");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mrvl->tx_len = le16_to_cpu(pkt->lhs);
+
+ clear_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&mrvl->flags, STATE_FW_REQ_PENDING);
+
+done:
+ kfree_skb(skb);
+ return ret;
+}
+
+static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_mrvl_pkt *pkt = (void *)skb->data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ u16 version = le16_to_cpu(pkt->lhs);
+ int ret = 0;
+
+ if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
+ bt_dev_err(hdev, "Corrupted mrvl header");
+ mrvl_send_ack(hu, MRVL_NAK);
+ ret = -EINVAL;
+ goto done;
+ }
+ mrvl_send_ack(hu, MRVL_ACK);
+
+ if (!test_bit(STATE_CHIP_VER_PENDING, &mrvl->flags)) {
+ bt_dev_err(hdev, "Received unexpected chip version");
+ goto done;
+ }
+
+ mrvl->id = version;
+ mrvl->rev = version >> 8;
+
+ bt_dev_info(hdev, "Controller id = %x, rev = %x", mrvl->id, mrvl->rev);
+
+ clear_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&mrvl->flags, STATE_CHIP_VER_PENDING);
+
+done:
+ kfree_skb(skb);
+ return ret;
+}
+
+#define HCI_RECV_CHIP_VER \
+ .type = HCI_CHIP_VER_PKT, \
+ .hlen = HCI_MRVL_PKT_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MRVL_PKT_SIZE
+
+#define HCI_RECV_FW_REQ \
+ .type = HCI_FW_REQ_PKT, \
+ .hlen = HCI_MRVL_PKT_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MRVL_PKT_SIZE
+
+static const struct h4_recv_pkt mrvl_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { HCI_RECV_FW_REQ, .recv = mrvl_recv_fw_req },
+ { HCI_RECV_CHIP_VER, .recv = mrvl_recv_chip_ver },
+};
+
+static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
+ if (IS_ERR(mrvl->rx_skb)) {
+ int err = PTR_ERR(mrvl->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ mrvl->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int mrvl_load_firmware(struct hci_dev *hdev, const char *name)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ const struct firmware *fw = NULL;
+ const u8 *fw_ptr, *fw_max;
+ int err;
+
+ err = request_firmware(&fw, name, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file %s", name);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_max = fw->data + fw->size;
+
+ bt_dev_info(hdev, "Loading %s", name);
+
+ set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+
+ while (fw_ptr <= fw_max) {
+ struct sk_buff *skb;
+
+ /* Controller drives the firmware load by sending firmware
+ * request packets containing the expected fragment size.
+ */
+ err = wait_on_bit_timeout(&mrvl->flags, STATE_FW_REQ_PENDING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(2000));
+ if (err == 1) {
+ bt_dev_err(hdev, "Firmware load interrupted");
+ err = -EINTR;
+ break;
+ } else if (err) {
+ bt_dev_err(hdev, "Firmware request timeout");
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ bt_dev_dbg(hdev, "Firmware request, expecting %d bytes",
+ mrvl->tx_len);
+
+ if (fw_ptr == fw_max) {
+ /* Controller requests a null size once firmware is
+ * fully loaded. If controller expects more data, there
+ * is an issue.
+ */
+ if (!mrvl->tx_len) {
+ bt_dev_info(hdev, "Firmware loading complete");
+ } else {
+ bt_dev_err(hdev, "Firmware loading failure");
+ err = -EINVAL;
+ }
+ break;
+ }
+
+ if (fw_ptr + mrvl->tx_len > fw_max) {
+ mrvl->tx_len = fw_max - fw_ptr;
+ bt_dev_dbg(hdev, "Adjusting tx_len to %d",
+ mrvl->tx_len);
+ }
+
+ skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hdev, "Failed to alloc mem for FW packet");
+ err = -ENOMEM;
+ break;
+ }
+ bt_cb(skb)->pkt_type = MRVL_RAW_DATA;
+
+ memcpy(skb_put(skb, mrvl->tx_len), fw_ptr, mrvl->tx_len);
+ fw_ptr += mrvl->tx_len;
+
+ set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+
+ skb_queue_tail(&mrvl->rawq, skb);
+ hci_uart_tx_wakeup(hu);
+ }
+
+ release_firmware(fw);
+ return err;
+}
+
+static int mrvl_setup(struct hci_uart *hu)
+{
+ int err;
+
+ hci_uart_set_flow_control(hu, true);
+
+ err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin");
+ if (err) {
+ bt_dev_err(hu->hdev, "Unable to download firmware helper");
+ return -EINVAL;
+ }
+
+ hci_uart_set_baudrate(hu, 3000000);
+ hci_uart_set_flow_control(hu, false);
+
+ err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin");
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct hci_uart_proto mrvl_proto = {
+ .id = HCI_UART_MRVL,
+ .name = "Marvell",
+ .init_speed = 115200,
+ .open = mrvl_open,
+ .close = mrvl_close,
+ .flush = mrvl_flush,
+ .setup = mrvl_setup,
+ .recv = mrvl_recv,
+ .enqueue = mrvl_enqueue,
+ .dequeue = mrvl_dequeue,
+};
+
+int __init mrvl_init(void)
+{
+ return hci_uart_register_proto(&mrvl_proto);
+}
+
+int __exit mrvl_deinit(void)
+{
+ return hci_uart_unregister_proto(&mrvl_proto);
+}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 683c2b642057..6c867fbc56a7 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -397,7 +397,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->txq);
skb_queue_head_init(&qca->tx_wait_q);
spin_lock_init(&qca->hci_ibs_lock);
- qca->workqueue = create_singlethread_workqueue("qca_wq");
+ qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
BT_ERR("QCA Workqueue not initialized properly");
kfree(qca);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 839bad1d8152..070139513e65 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 10
+#define HCI_UART_MAX_PROTO 12
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -47,6 +47,8 @@
#define HCI_UART_BCM 7
#define HCI_UART_QCA 8
#define HCI_UART_AG6XX 9
+#define HCI_UART_NOKIA 10
+#define HCI_UART_MRVL 11
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -189,3 +191,8 @@ int qca_deinit(void);
int ag6xx_init(void);
int ag6xx_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_MRVL
+int mrvl_init(void);
+int mrvl_deinit(void);
+#endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 9af359544110..267f99523fbe 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_BUS_I2S1] = { 0x2d0, BIT(13) },
[RST_BUS_I2S2] = { 0x2d0, BIT(14) },
- [RST_BUS_I2C0] = { 0x2d4, BIT(0) },
- [RST_BUS_I2C1] = { 0x2d4, BIT(1) },
- [RST_BUS_I2C2] = { 0x2d4, BIT(2) },
- [RST_BUS_UART0] = { 0x2d4, BIT(16) },
- [RST_BUS_UART1] = { 0x2d4, BIT(17) },
- [RST_BUS_UART2] = { 0x2d4, BIT(18) },
- [RST_BUS_UART3] = { 0x2d4, BIT(19) },
- [RST_BUS_SCR] = { 0x2d4, BIT(20) },
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_SCR] = { 0x2d8, BIT(20) },
};
static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 4470ffc8cf0d..d6fafb397489 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -14,9 +14,9 @@
#include "ccu_gate.h"
#include "ccu_nk.h"
-void ccu_nk_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_n, unsigned int max_k,
- unsigned int *n, unsigned int *k)
+static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
+ unsigned int max_n, unsigned int max_k,
+ unsigned int *n, unsigned int *k)
{
unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0;
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 0ee1f363e4be..d8eab90ae661 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
SUN4I_PLL2_PRE_DIV_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&sun4i_a10_pll2_lock);
- if (!prediv_clk) {
+ if (IS_ERR(prediv_clk)) {
pr_err("Couldn't register the prediv clock\n");
goto err_free_array;
}
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
&mult->hw, &clk_multiplier_ops,
&gate->hw, &clk_gate_ops,
CLK_SET_RATE_PARENT);
- if (!base_clk) {
+ if (IS_ERR(base_clk)) {
pr_err("Couldn't register the base multiplier clock\n");
goto err_free_multiplier;
}
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 411d3033a96e..b200ebf159ee 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
return;
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (!reg) {
+ if (IS_ERR(reg)) {
pr_err("Could not get registers for sun8i-mbus-clk\n");
goto err_free_parents;
}
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2f6156b672ce..fb5f9bbfa09c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -39,12 +39,10 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
};
-static struct cxgb4_pci_uld_info chcr_uld_info = {
+static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
- .nrxq = 4,
+ .nrxq = MAX_ULD_QSETS,
.rxq_size = 1024,
- .nciq = 0,
- .ciq_size = 0,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
@@ -205,7 +203,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
- if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) {
+ if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1;
}
@@ -228,7 +226,7 @@ static void __exit chcr_crypto_exit(void)
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
- cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1);
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
}
module_init(chcr_crypto_init);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5a2631af7410..7dd2e2d37231 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
}
if (subnode) {
- node = of_get_flat_dt_subnode_by_name(node, subnode);
- if (node < 0)
+ int err = of_get_flat_dt_subnode_by_name(node, subnode);
+
+ if (err < 0)
return 0;
+
+ node = err;
}
return __find_uefi_params(node, info, dt_params[i].params);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 3bd127f95315..aded10662020 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
struct file_info {
efi_file_handle_t *handle;
u64 size;
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
}
}
+static inline bool mmap_has_headroom(unsigned long buff_size,
+ unsigned long map_size,
+ unsigned long desc_size)
+{
+ unsigned long slack = buff_size - map_size;
+
+ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr)
+ struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
unsigned long key;
u32 desc_version;
- *map_size = sizeof(*m) * 32;
+ *map->desc_size = sizeof(*m);
+ *map->map_size = *map->desc_size * 32;
+ *map->buff_size = *map->map_size;
again:
- /*
- * Add an additional efi_memory_desc_t because we're doing an
- * allocation which may be in a new descriptor region.
- */
- *map_size += sizeof(*m);
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- *map_size, (void **)&m);
+ *map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- *desc_size = 0;
+ *map->desc_size = 0;
key = 0;
- status = efi_call_early(get_memory_map, map_size, m,
- &key, desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_call_early(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL ||
+ !mmap_has_headroom(*map->buff_size, *map->map_size,
+ *map->desc_size)) {
efi_call_early(free_pool, m);
+ /*
+ * Make sure there is some entries of headroom so that the
+ * buffer can be reused for a new map after allocations are
+ * no longer permitted. Its unlikely that the map will grow to
+ * exceed this headroom once we are ready to trigger
+ * ExitBootServices()
+ */
+ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ *map->buff_size = *map->map_size;
goto again;
}
if (status != EFI_SUCCESS)
efi_call_early(free_pool, m);
- if (key_ptr && status == EFI_SUCCESS)
- *key_ptr = key;
- if (desc_ver && status == EFI_SUCCESS)
- *desc_ver = desc_version;
+ if (map->key_ptr && status == EFI_SUCCESS)
+ *map->key_ptr = key;
+ if (map->desc_ver && status == EFI_SUCCESS)
+ *map->desc_ver = desc_version;
fail:
- *map = m;
+ *map->map = m;
return status;
}
@@ -113,13 +128,20 @@ fail:
unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
{
efi_status_t status;
- unsigned long map_size;
+ unsigned long map_size, buff_size;
unsigned long membase = EFI_ERROR;
struct efi_memory_map map;
efi_memory_desc_t *md;
+ struct efi_boot_memmap boot_map;
- status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
- &map_size, &map.desc_size, NULL, NULL);
+ boot_map.map = (efi_memory_desc_t **)&map.map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &map.desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
return membase;
@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
u64 max_addr = 0;
int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
- NULL, NULL);
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
int i;
+ struct efi_boot_memmap boot_map;
- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
- NULL, NULL);
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
*cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
+
+/*
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec. Obtains the current memory map, and returns that info after calling
+ * ExitBootServices. The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices. A client
+ * specific structure may be passed to the function via priv. The client
+ * function may be called multiple times.
+ */
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func)
+{
+ efi_status_t status;
+
+ status = efi_get_memory_map(sys_table_arg, map);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(sys_table_arg, map, priv);
+ if (status != EFI_SUCCESS)
+ goto free_map;
+
+ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+
+ if (status == EFI_INVALID_PARAMETER) {
+ /*
+ * The memory map changed between efi_get_memory_map() and
+ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
+ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+ * updated map, and try again. The spec implies one retry
+ * should be sufficent, which is confirmed against the EDK2
+ * implementation. Per the spec, we can only invoke
+ * get_memory_map() and exit_boot_services() - we cannot alloc
+ * so efi_get_memory_map() cannot be used, and we must reuse
+ * the buffer. For all practical purposes, the headroom in the
+ * buffer should account for any changes in the map so the call
+ * to get_memory_map() is expected to succeed here.
+ */
+ *map->map_size = *map->buff_size;
+ status = efi_call_early(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(sys_table_arg, map, priv);
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ }
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ return EFI_SUCCESS;
+
+free_map:
+ efi_call_early(free_pool, *map->map);
+fail:
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index e58abfa953cc..a6a93116a8f0 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -152,6 +152,27 @@ fdt_set_fail:
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv)
+{
+ struct exit_boot_struct *p = priv;
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+ return EFI_SUCCESS;
+}
+
/*
* Allocate memory for a new FDT, then add EFI, commandline, and
* initrd related fields to the FDT. This routine increases the
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
u32 desc_ver;
unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size;
efi_status_t status;
int runtime_entry_count = 0;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_ver;
+ map.key_ptr = &mmap_key;
+ map.buff_size = &buff_size;
/*
* Get a copy of the current memory map that we will use to prepare
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
- status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
+ status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
return status;
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi(sys_table,
"Exiting boot services and installing virtual address map...\n");
+ map.map = &memory_map;
/*
* Estimate size of new FDT, and allocate memory for it. We
* will allocate a bigger buffer if this ends up being too
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* we can get the memory map key needed for
* exit_boot_services().
*/
- status = efi_get_memory_map(sys_table, &memory_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
+ status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
- /*
- * Update the memory map with virtual addresses. The function will also
- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
- * entries so that we can pass it straight into SetVirtualAddressMap()
- */
- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
- &runtime_entry_count);
-
- /* Now we are ready to exit_boot_services.*/
- status = sys_table->boottime->exit_boot_services(handle, mmap_key);
+ sys_table->boottime->free_pool(memory_map);
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 53f6d3fe6d86..0c9f58c5ba50 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
unsigned long random_seed)
{
unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long buff_size;
efi_status_t status;
efi_memory_desc_t *memory_map;
int map_offset;
+ struct efi_boot_memmap map;
- status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
- &desc_size, NULL, NULL);
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &map);
if (status != EFI_SUCCESS)
return status;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index df7ab2458e50..39c01b942ee4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1708,11 +1708,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true;
+ drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
- drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index a978381ef95b..9b17a66cf0e1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
-void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
{
struct atmel_hlcdc_crtc_state *state;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 016c191221f3..52c527f6642a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
- if (state->crtc_w < state->src_w)
+ if (state->crtc_h < state->src_h)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * state->src_w) - (256 * 4)) /
- state->crtc_w;
+ factor = ((8 * 256 * state->src_h) - (256 * 4)) /
+ state->crtc_h;
factor++;
- max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
2048;
- if (max_memsize > state->src_w)
+ if (max_memsize > state->src_h)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 57676f8d7ecf..a6289752be16 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
@@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
return 0;
}
+#endif
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
};
/**
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index e0166403b4bd..40ce841eb952 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
flags = exynos_gem->flags;
/*
- * without iommu support, not support physically non-continuous memory
- * for framebuffer.
+ * Physically non-contiguous memory type for framebuffer is not
+ * supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
+ DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 0525c56145db..147ef0d298cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int fimc_suspend(struct device *dev)
-{
- struct fimc_context *ctx = get_fimc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return fimc_clk_ctrl(ctx, false);
-}
-
-static int fimc_resume(struct device *dev)
-{
- struct fimc_context *ctx = get_fimc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (!pm_runtime_suspended(dev))
- return fimc_clk_ctrl(ctx, true);
-
- return 0;
-}
-#endif
-
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops fimc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 4bf00f57ffe8..6eca8bb88648 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1475,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int g2d_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1490,25 +1490,6 @@ static int g2d_suspend(struct device *dev)
flush_work(&g2d->runqueue_work);
- return 0;
-}
-
-static int g2d_resume(struct device *dev)
-{
- struct g2d_data *g2d = dev_get_drvdata(dev);
-
- g2d->suspended = false;
- g2d_exec_runqueue(g2d);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int g2d_runtime_suspend(struct device *dev)
-{
- struct g2d_data *g2d = dev_get_drvdata(dev);
-
clk_disable_unprepare(g2d->gate_clk);
return 0;
@@ -1523,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to enable clock.\n");
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
return ret;
}
#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 5d20da8f957e..52a9d269484e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gsc_suspend(struct device *dev)
-{
- struct gsc_context *ctx = get_gsc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return gsc_clk_ctrl(ctx, false);
-}
-
-static int gsc_resume(struct device *dev)
-{
- struct gsc_context *ctx = get_gsc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (!pm_runtime_suspended(dev))
- return gsc_clk_ctrl(ctx, true);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int gsc_runtime_suspend(struct device *dev)
+static int __maybe_unused gsc_runtime_suspend(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev)
return gsc_clk_ctrl(ctx, false);
}
-static int gsc_runtime_resume(struct device *dev)
+static int __maybe_unused gsc_runtime_resume(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev)
return gsc_clk_ctrl(ctx, true);
}
-#endif
static const struct dev_pm_ops gsc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 404367a430b5..6591e406084c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable)
return 0;
}
-
-#ifdef CONFIG_PM_SLEEP
-static int rotator_suspend(struct device *dev)
-{
- struct rot_context *rot = dev_get_drvdata(dev);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return rotator_clk_crtl(rot, false);
-}
-
-static int rotator_resume(struct device *dev)
-{
- struct rot_context *rot = dev_get_drvdata(dev);
-
- if (!pm_runtime_suspended(dev))
- return rotator_clk_crtl(rot, true);
-
- return 0;
-}
-#endif
-
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
@@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rotator_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
NULL)
};
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 95ddd56b89f0..5de36d8dcc68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1281,6 +1281,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
+ /* Everything is in place, we can now relax! */
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver.name, driver.major, driver.minor, driver.patchlevel,
+ driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
intel_runtime_pm_put(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7a30af79d799..f38ceffd82c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -122,8 +122,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt =
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
- if (intel_vgpu_active(dev_priv))
- has_full_ppgtt = false; /* emulation is too hard */
+ if (intel_vgpu_active(dev_priv)) {
+ /* emulation is too hard */
+ has_full_ppgtt = false;
+ has_full_48bit_ppgtt = false;
+ }
if (!has_aliasing_ppgtt)
return 0;
@@ -158,7 +161,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index f6acb5a0e701..b81cfb3b22ec 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- if (!IS_HASWELL(dev_priv))
- return;
-
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 47bdf9dad0d3..b9e5a63a7c9e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -554,7 +554,6 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index adca262d591a..7acbbbf97833 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -1047,6 +1047,23 @@ err_out:
return err;
}
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+ {
+ .callback = intel_use_opregion_panel_type_callback,
+ .ident = "Conrac GmbH IX45GM2",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+ },
+ },
+ { }
+};
+
int
intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
{
@@ -1073,6 +1090,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
}
/*
+ * So far we know that some machined must use it, others must not use it.
+ * There doesn't seem to be any way to determine which way to go, except
+ * via a quirk list :(
+ */
+ if (!dmi_check_system(intel_use_opregion_panel_type)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
+ /*
* FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
* low vswing for eDP, whereas the VBT panel type (2) gives us normal
* vswing instead. Low vswing results in some display flickers, so
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 53e13c10e4ea..2d2481392824 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7859,6 +7859,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
case GEN6_PCODE_ILLEGAL_CMD:
return -ENXIO;
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
case GEN6_PCODE_TIMEOUT:
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2b0d1baf15b3..cf171b4b8c67 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t max_sleep_time = 0x1f;
- /* Lately it was identified that depending on panel idle frame count
- * calculated at HW can be off by 1. So let's use what came
- * from VBT + 1.
- * There are also other cases where panel demands at least 4
- * but VBT is not being set. To cover these 2 cases lets use
- * at least 5 when VBT isn't set to be on the safest side.
+ /*
+ * Let's respect VBT in case VBT asks a higher idle_frame value.
+ * Let's use 6 as the minimum to cover all known cases including
+ * the off-by-one issue that HW has in some cases. Also there are
+ * cases where sink should be able to train
+ * with the 5 or 6 idle patterns.
*/
- uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
+ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
uint32_t val = EDP_PSR_ENABLE;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 7ea8aa7ca408..6bc712f32c8b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -175,6 +175,7 @@ struct nvkm_device_func {
void (*fini)(struct nvkm_device *, bool suspend);
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ bool cpu_coherent;
};
struct nvkm_device_quirk {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6190035edfea..864323b19cf7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -209,7 +209,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+ if (!nvxx_device(&drm->device)->func->cpu_coherent)
+ nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
if (drm->client.vm) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index b1b693219db3..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,6 +1614,7 @@ nvkm_device_pci_func = {
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
+ .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 939682f18788..9b638bd905ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -245,6 +245,7 @@ nvkm_device_tegra_func = {
.fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size,
+ .cpu_coherent = false,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index edec30fd3ecd..0a7b6ed5ed28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+ mutex_lock(&chan->fifo->base.engine.subdev.mutex);
nvkm_ramht_remove(imem->ramht, cookie);
+ mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
}
static int
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e6abc09b67e3..1f78ec2548ec 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->pdev->device == 0x6811 &&
rdev->pdev->revision == 0x81)
max_mclk = 120000;
+ /* limit sclk/mclk on Jet parts for stability */
+ if (rdev->pdev->device == 0x6665 &&
+ rdev->pdev->revision == 0xc3) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 9688bfa92ccd..611b6b9bb3cb 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
return 0;
cmd = urb->transfer_buffer;
- for (i = y; i < height ; i++) {
+ for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 59adcf8532dd..3f6704cf6608 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
return &vc4->bo_cache.size_list[page_index];
}
-void vc4_bo_cache_purge(struct drm_device *dev)
+static void vc4_bo_cache_purge(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 46527e989ce3..2543cf5b8b51 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
* of uniforms on each side. However, this scheme is easy to
* validate so it's all we allow for now.
*/
-
- if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+ switch (QPU_GET_FIELD(inst, QPU_SIG)) {
+ case QPU_SIG_NONE:
+ case QPU_SIG_SCOREBOARD_UNLOCK:
+ case QPU_SIG_COLOR_LOAD:
+ case QPU_SIG_LOAD_TMU0:
+ case QPU_SIG_LOAD_TMU1:
+ break;
+ default:
DRM_ERROR("uniforms address change must be "
"normal math\n");
return false;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 137125b5eae7..5ce71ce7b6c4 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_request_irq;
- }
-
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.of_node = pdev->dev.of_node;
pch_adap->dev.parent = &pdev->dev;
+ }
+
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
+ for (i = 0; i < adap_info->ch_num; i++) {
+ pch_adap = &adap_info->pch_data[i].pch_adapter;
pch_i2c_init(&adap_info->pch_data[i]);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 501bd15cb78e..a8497cfdae6f 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
#ifdef CONFIG_PM_SLEEP
static int qup_i2c_suspend(struct device *device)
{
- qup_i2c_pm_suspend_runtime(device);
+ if (!pm_runtime_suspended(device))
+ return qup_i2c_pm_suspend_runtime(device);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 528e755c468f..3278ebf1cc5c 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval);
- data->last_chan = regval;
+ data->last_chan = ret ? 0 : regval;
}
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 3cbbfbe7da7c..71c8867ef66b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -332,6 +332,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
spin_lock_irqsave(&ep->com.dev->lock, flags);
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
+ if (idr_is_empty(&ep->com.dev->hwtid_idr))
+ wake_up(&ep->com.dev->wait);
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
}
@@ -2014,8 +2016,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
}
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, rt_tos2priority(tos));
- if (!ep->l2t)
+ if (!ep->l2t) {
+ dev_put(pdev);
goto out;
+ }
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 071d7332ec06..93e3d270a98a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
static void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
idr_destroy(&ctx->dev->cqidr);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
idr_destroy(&ctx->dev->qpidr);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
idr_destroy(&ctx->dev->mmidr);
+ wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
idr_destroy(&ctx->dev->hwtid_idr);
idr_destroy(&ctx->dev->stid_idr);
idr_destroy(&ctx->dev->atid_idr);
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
INIT_LIST_HEAD(&devp->db_fc_list);
+ init_waitqueue_head(&devp->wait);
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
if (c4iw_debugfs_root) {
@@ -1475,6 +1480,10 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 511,
+ .ciq = true,
+ .lro = false,
.add = c4iw_uld_add,
.rx_handler = c4iw_uld_rx_handler,
.state_change = c4iw_uld_state_change,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 6a9bef1f09a8..cdcf3eeb6f4a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -263,6 +263,7 @@ struct c4iw_dev {
struct idr stid_idr;
struct list_head db_fc_list;
u32 avail_ird;
+ wait_queue_head_t wait;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9c2e53d28f98..0f21c3a25552 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
/* Generate GUID changed event */
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+ if (mlx4_is_master(dev->dev)) {
+ union ib_gid gid;
+ int err = 0;
+
+ if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+ err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+ else
+ gid.global.subnet_prefix =
+ eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+ if (err) {
+ pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+ port, err);
+ } else {
+ pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+ port,
+ (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+ be64_to_cpu(gid.global.subnet_prefix));
+ atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
+ }
+ }
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
/*if master, notify all slaves*/
if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
if (err)
goto demux_err;
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+ atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
&dev->sriov.sqps[i]);
if (err)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 2af44c2de262..87ba9bca4181 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
bool per_port = !!(ibdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+ if (mlx4_is_slave(ibdev->dev))
+ return 0;
+
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
/* i == 1 means we are building port counters */
if (i && !per_port)
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 8f7ad07915b0..097bfcc4ee99 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
if (!group->members[i])
leave_state |= (1 << i);
- return leave_state & (group->rec.scope_join_state & 7);
+ return leave_state & (group->rec.scope_join_state & 0xf);
}
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
} else
mcg_warn_group(group, "DRIVER BUG\n");
} else if (group->state == MCAST_LEAVE_SENT) {
- if (group->rec.scope_join_state & 7)
- group->rec.scope_join_state &= 0xf8;
+ if (group->rec.scope_join_state & 0xf)
+ group->rec.scope_join_state &= 0xf0;
group->state = MCAST_IDLE;
mutex_unlock(&group->lock);
if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
static int handle_join_req(struct mcast_group *group, u8 join_mask,
struct mcast_req *req)
{
- u8 group_join_state = group->rec.scope_join_state & 7;
+ u8 group_join_state = group->rec.scope_join_state & 0xf;
int ref = 0;
u16 status;
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
u8 cur_join_state;
resp_join_state = ((struct ib_sa_mcmember_data *)
- group->response_sa_mad.data)->scope_join_state & 7;
- cur_join_state = group->rec.scope_join_state & 7;
+ group->response_sa_mad.data)->scope_join_state & 0xf;
+ cur_join_state = group->rec.scope_join_state & 0xf;
if (method == IB_MGMT_METHOD_GET_RESP) {
/* successfull join */
@@ -710,7 +710,7 @@ process_requests:
req = list_first_entry(&group->pending_list, struct mcast_req,
group_list);
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
- req_join_state = sa_data->scope_join_state & 0x7;
+ req_join_state = sa_data->scope_join_state & 0xf;
/* For a leave request, we will immediately answer the VF, and
* update our internal counters. The actual leave will be sent
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7c5832ede4bd..686ab48ff644 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
struct workqueue_struct *wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
- __be64 subnet_prefix;
+ atomic64_t subnet_prefix;
__be64 guid_cache[128];
struct mlx4_ib_dev *dev;
/* the following lock protects both mcg_table and mcg_mgid0_list */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 768085f59566..7fb9629bd12b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
sqp->ud_header.grh.flow_label =
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
- if (is_eth)
+ if (is_eth) {
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
- else {
- if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
- /* When multi-function is enabled, the ib_core gid
- * indexes don't necessarily match the hw ones, so
- * we must use our own cache */
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- subnet_prefix;
- sqp->ud_header.grh.source_gid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
- } else
- ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index,
- &sqp->ud_header.grh.source_gid, NULL);
+ } else {
+ if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+ /* When multi-function is enabled, the ib_core gid
+ * indexes don't necessarily match the hw ones, so
+ * we must use our own cache
+ */
+ sqp->ud_header.grh.source_gid.global.subnet_prefix =
+ cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+ demux[sqp->qp.port - 1].
+ subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global.interface_id =
+ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+ guid_cache[ah->av.ib.gid_index];
+ } else {
+ ib_get_cached_gid(ib_dev,
+ be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index,
+ &sqp->ud_header.grh.source_gid, NULL);
+ }
}
memcpy(sqp->ud_header.grh.destination_gid.raw,
ah->av.ib.dgid, 16);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e4aecbf34131..551aa0e789aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -284,7 +284,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
- return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ return 0;
}
enum {
@@ -1423,6 +1425,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dmac_47_16),
ib_spec->eth.val.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ smac_47_16),
+ ib_spec->eth.mask.src_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ smac_47_16),
+ ib_spec->eth.val.src_mac);
+
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
vlan_tag, 1);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 80c4b6b401b8..46b64970058e 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
{
rvt_deinit_mregion(&mr->mr);
rvt_free_lkey(&mr->mr);
- vfree(mr);
+ kfree(mr);
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 55f0e8f0ca79..ddd59270ff6d 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
return err;
}
- err = rxe_net_init();
+ err = rxe_net_ipv4_init();
if (err) {
- pr_err("rxe: unable to init\n");
+ pr_err("rxe: unable to init ipv4 tunnel\n");
rxe_cache_exit();
- return err;
+ goto exit;
+ }
+
+ err = rxe_net_ipv6_init();
+ if (err) {
+ pr_err("rxe: unable to init ipv6 tunnel\n");
+ rxe_cache_exit();
+ goto exit;
}
+
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ pr_err("rxe: Failed to rigister netdev notifier\n");
+ goto exit;
+ }
+
pr_info("rxe: loaded\n");
return 0;
+
+exit:
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ return err;
}
static void __exit rxe_module_exit(void)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 36f67de44095..1c59ef2c67aa 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -689,7 +689,14 @@ int rxe_completer(void *arg)
qp->req.need_retry = 1;
rxe_run_task(&qp->req.task, 1);
}
+
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+
goto exit;
+
} else {
wqe->status = IB_WC_RETRY_EXC_ERR;
state = COMPST_ERROR;
@@ -716,6 +723,12 @@ int rxe_completer(void *arg)
case COMPST_ERROR:
do_complete(qp, wqe);
rxe_qp_error(qp);
+
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+
goto exit;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 0b8d2ea8b41d..eedf2f1cafdf 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return sock;
}
-static void rxe_release_udp_tunnel(struct socket *sk)
+void rxe_release_udp_tunnel(struct socket *sk)
{
- udp_tunnel_sock_release(sk);
+ if (sk)
+ udp_tunnel_sock_release(sk);
}
static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
@@ -658,51 +659,45 @@ out:
return NOTIFY_OK;
}
-static struct notifier_block rxe_net_notifier = {
+struct notifier_block rxe_net_notifier = {
.notifier_call = rxe_notify,
};
-int rxe_net_init(void)
+int rxe_net_ipv4_init(void)
{
- int err;
-
spin_lock_init(&dev_list_lock);
- recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
- htons(ROCE_V2_UDP_DPORT), true);
- if (IS_ERR(recv_sockets.sk6)) {
- recv_sockets.sk6 = NULL;
- pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
- return -1;
- }
-
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
- htons(ROCE_V2_UDP_DPORT), false);
+ htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) {
- rxe_release_udp_tunnel(recv_sockets.sk6);
recv_sockets.sk4 = NULL;
- recv_sockets.sk6 = NULL;
pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
return -1;
}
- err = register_netdevice_notifier(&rxe_net_notifier);
- if (err) {
- rxe_release_udp_tunnel(recv_sockets.sk6);
- rxe_release_udp_tunnel(recv_sockets.sk4);
- pr_err("rxe: Failed to rigister netdev notifier\n");
- }
-
- return err;
+ return 0;
}
-void rxe_net_exit(void)
+int rxe_net_ipv6_init(void)
{
- if (recv_sockets.sk6)
- rxe_release_udp_tunnel(recv_sockets.sk6);
+#if IS_ENABLED(CONFIG_IPV6)
- if (recv_sockets.sk4)
- rxe_release_udp_tunnel(recv_sockets.sk4);
+ spin_lock_init(&dev_list_lock);
+ recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), true);
+ if (IS_ERR(recv_sockets.sk6)) {
+ recv_sockets.sk6 = NULL;
+ pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+void rxe_net_exit(void)
+{
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ rxe_release_udp_tunnel(recv_sockets.sk4);
unregister_netdevice_notifier(&rxe_net_notifier);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 7b06f76d16cc..0daf7f09e5b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -44,10 +44,13 @@ struct rxe_recv_sockets {
};
extern struct rxe_recv_sockets recv_sockets;
+extern struct notifier_block rxe_net_notifier;
+void rxe_release_udp_tunnel(struct socket *sk);
struct rxe_dev *rxe_net_add(struct net_device *ndev);
-int rxe_net_init(void);
+int rxe_net_ipv4_init(void);
+int rxe_net_ipv6_init(void);
void rxe_net_exit(void);
#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 3d464c23e08b..144d2f129fcd 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
* make a copy of the skb to post to the next qp
*/
skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
- skb_clone(skb, GFP_KERNEL) : NULL;
+ skb_clone(skb, GFP_ATOMIC) : NULL;
pkt->qp = qp;
rxe_add_ref(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 33b2d9d77021..13a848a518e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
static void update_wqe_state(struct rxe_qp *qp,
- struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt,
- enum wqe_state *prev_state)
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt)
{
- enum wqe_state prev_state_ = wqe->state;
-
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
} else {
wqe->state = wqe_state_processing;
}
-
- *prev_state = prev_state_;
}
-static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt, int payload)
+static void update_wqe_psn(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt,
+ int payload)
{
/* number of packets left to send including current one */
int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
else
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
- qp->req.opcode = pkt->opcode;
+static void save_state(struct rxe_send_wqe *wqe,
+ struct rxe_qp *qp,
+ struct rxe_send_wqe *rollback_wqe,
+ struct rxe_qp *rollback_qp)
+{
+ rollback_wqe->state = wqe->state;
+ rollback_wqe->first_psn = wqe->first_psn;
+ rollback_wqe->last_psn = wqe->last_psn;
+ rollback_qp->req.psn = qp->req.psn;
+}
+static void rollback_state(struct rxe_send_wqe *wqe,
+ struct rxe_qp *qp,
+ struct rxe_send_wqe *rollback_wqe,
+ struct rxe_qp *rollback_qp)
+{
+ wqe->state = rollback_wqe->state;
+ wqe->first_psn = rollback_wqe->first_psn;
+ wqe->last_psn = rollback_wqe->last_psn;
+ qp->req.psn = rollback_qp->req.psn;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt, int payload)
+{
+ qp->req.opcode = pkt->opcode;
if (pkt->mask & RXE_END_MASK)
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
@@ -571,7 +593,8 @@ int rxe_requester(void *arg)
int mtu;
int opcode;
int ret;
- enum wqe_state prev_state;
+ struct rxe_qp rollback_qp;
+ struct rxe_send_wqe rollback_wqe;
next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -688,13 +711,21 @@ next_wqe:
goto err;
}
- update_wqe_state(qp, wqe, &pkt, &prev_state);
+ /*
+ * To prevent a race on wqe access between requester and completer,
+ * wqe members state and psn need to be set before calling
+ * rxe_xmit_packet().
+ * Otherwise, completer might initiate an unjustified retry flow.
+ */
+ save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
if (ret) {
qp->need_req_skb = 1;
kfree_skb(skb);
- wqe->state = prev_state;
+ rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
if (ret == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index ebb03b46e2ad..3e0f0f2baace 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
+ memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
- res->first_psn = qp->resp.psn;
- res->last_psn = qp->resp.psn;
- res->cur_psn = qp->resp.psn;
+ res->first_psn = ack_pkt.psn;
+ res->last_psn = ack_pkt.psn;
+ res->cur_psn = ack_pkt.psn;
rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
if (rc) {
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
rc = RESPST_CLEANUP;
goto out;
}
- bth_set_psn(SKB_TO_PKT(skb_copy),
- qp->resp.psn - 1);
+
/* Resend the result. */
rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
pkt, skb_copy);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dc6d241b9406..be11d5d5b8c1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_flush_ah(dev);
}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 5d11fea3c8ec..f3135ae22df4 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -950,6 +950,12 @@ static const struct input_device_id joydev_ids[] = {
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { BIT_MASK(ABS_Z) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_WHEEL) },
},
{
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index b2744a64e933..f502c8488be8 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
}
- error = device_property_read_string(dev, "touchscreen-fw-name", &str);
+ error = device_property_read_string(dev, "firmware-name", &str);
if (!error)
- snprintf(data->fw_name, sizeof(data->fw_name), "%s", str);
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "silead/%s", str);
else
dev_dbg(dev, "Firmware file name read error. Using default.");
}
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
if (!acpi_id)
return -ENODEV;
- snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
- acpi_id->id);
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "silead/%s.fw", acpi_id->id);
for (i = 0; i < strlen(data->fw_name); i++)
data->fw_name[i] = tolower(data->fw_name[i]);
} else {
- snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
- id->name);
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "silead/%s.fw", id->name);
}
return 0;
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
const struct i2c_device_id *id)
{
- snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name);
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "silead/%s.fw", id->name);
return 0;
}
#endif
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 112e17c2768b..37f952dd9fc9 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
{
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
+ unsigned long flags;
unsigned smr;
int idx;
int ret;
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
gc = dgc->gc[idx];
- irq_gc_lock(gc);
+ irq_gc_lock_irqsave(gc, flags);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
- irq_gc_unlock(gc);
+ irq_gc_unlock_irqrestore(gc, flags);
return ret;
}
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 4f0d068e1abe..2a624d87a035 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
unsigned int *out_type)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+ unsigned long flags;
unsigned smr;
int ret;
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
if (ret)
return ret;
- irq_gc_lock(bgc);
+ irq_gc_lock_irqsave(bgc, flags);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
- irq_gc_unlock(bgc);
+ irq_gc_unlock_irqrestore(bgc, flags);
return ret;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index ede5672ab34d..da6c0ba61d4f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id)
{
- int cpu = *base_cpu;
+ int next_cpu, cpu = *base_cpu;
unsigned long mpidr = cpu_logical_map(cpu);
u16 tlist = 0;
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
tlist |= 1 << (mpidr & 0xf);
- cpu = cpumask_next(cpu, mask);
- if (cpu >= nr_cpu_ids)
+ next_cpu = cpumask_next(cpu, mask);
+ if (next_cpu >= nr_cpu_ids)
goto out;
+ cpu = next_cpu;
mpidr = cpu_logical_map(cpu);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 83f498393a7f..6185696405d5 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
- /*
- * HACK: These are all really percpu interrupts, but the rest
- * of the MIPS kernel code does not use the percpu IRQ API for
- * the CP0 timer and performance counter interrupts.
- */
- switch (intr) {
- case GIC_LOCAL_INT_TIMER:
- case GIC_LOCAL_INT_PERFCTR:
- case GIC_LOCAL_INT_FDC:
- irq_set_chip_and_handler(virq,
- &gic_all_vpes_local_irq_controller,
- handle_percpu_irq);
- break;
- default:
- irq_set_chip_and_handler(virq,
- &gic_local_irq_controller,
- handle_percpu_devid_irq);
- irq_set_percpu_devid(virq);
- break;
- }
-
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
return 0;
}
-static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
+static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
+ unsigned int hwirq)
{
- if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
- return gic_local_irq_domain_map(d, virq, hw);
+ struct irq_chip *chip;
+ int err;
+
+ if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ &gic_level_irq_controller,
+ NULL);
+ } else {
+ switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+ case GIC_LOCAL_INT_TIMER:
+ case GIC_LOCAL_INT_PERFCTR:
+ case GIC_LOCAL_INT_FDC:
+ /*
+ * HACK: These are all really percpu interrupts, but
+ * the rest of the MIPS kernel code does not use the
+ * percpu IRQ API for them.
+ */
+ chip = &gic_all_vpes_local_irq_controller;
+ irq_set_handler(virq, handle_percpu_irq);
+ break;
+
+ default:
+ chip = &gic_local_irq_controller;
+ irq_set_handler(virq, handle_percpu_devid_irq);
+ irq_set_percpu_devid(virq);
+ break;
+ }
- irq_set_chip_and_handler(virq, &gic_level_irq_controller,
- handle_level_irq);
+ err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+ chip, NULL);
+ }
- return gic_shared_irq_domain_map(d, virq, hw, 0);
+ return err;
}
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
int cpu, ret, i;
if (spec->type == GIC_DEVICE) {
- /* verify that it doesn't conflict with an IPI irq */
- if (test_bit(spec->hwirq, ipi_resrv))
+ /* verify that shared irqs don't conflict with an IPI irq */
+ if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
+ test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
return -EBUSY;
- hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
-
- return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
- &gic_level_irq_controller,
- NULL);
+ return gic_setup_dev_chip(d, virq, spec->hwirq);
} else {
base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
if (base_hwirq == gic_shared_intrs) {
@@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
}
static const struct irq_domain_ops gic_irq_domain_ops = {
- .map = gic_irq_domain_map,
.alloc = gic_irq_domain_alloc,
.free = gic_irq_domain_free,
.match = gic_irq_domain_match,
@@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
struct irq_fwspec *fwspec = arg;
struct gic_irq_spec spec = {
.type = GIC_DEVICE,
- .hwirq = fwspec->param[1],
};
int i, ret;
- bool is_shared = fwspec->param[0] == GIC_SHARED;
- if (is_shared) {
- ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < nr_irqs; i++) {
- irq_hw_number_t hwirq;
+ if (fwspec->param[0] == GIC_SHARED)
+ spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
+ else
+ spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
- if (is_shared)
- hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
- else
- hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+ if (ret)
+ return ret;
- ret = irq_domain_set_hwirq_and_chip(d, virq + i,
- hwirq,
- &gic_level_irq_controller,
- NULL);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
if (ret)
goto error;
}
@@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
static void gic_dev_domain_activate(struct irq_domain *domain,
struct irq_data *d)
{
- gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
+ if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
+ gic_local_irq_domain_map(domain, d->irq, d->hwirq);
+ else
+ gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
}
static struct irq_domain_ops gic_dev_domain_ops = {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 67642bacd597..915e84d631a2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7610,16 +7610,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
- int err;
-
- err = request_module("md-cluster");
- if (err) {
- pr_err("md-cluster module not found.\n");
- return -ENOENT;
- }
-
+ if (!md_cluster_ops)
+ request_module("md-cluster");
spin_lock(&pers_lock);
+ /* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ pr_err("can't find md-cluster module or get it's reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 51f76ddbe265..1b1ab4a1d132 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -96,7 +96,6 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
- bool in_teardown;
};
/*
@@ -704,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
mddev = log->rdev->mddev;
/*
- * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
- * wait for this thread to finish. This thread waits for
- * MD_CHANGE_PENDING clear, which is supposed to be done in
- * md_check_recovery(). md_check_recovery() tries to get
- * reconfig_mutex. Since r5l_quiesce already holds the mutex,
- * md_check_recovery() fails, so the PENDING never get cleared. The
- * in_teardown check workaround this issue.
+ * Discard could zero data, so before discard we must make sure
+ * superblock is updated to new log tail. Updating superblock (either
+ * directly call md_update_sb() or depend on md thread) must hold
+ * reconfig mutex. On the other hand, raid5_quiesce is called with
+ * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
+ * for all IO finish, hence waitting for reclaim thread, while reclaim
+ * thread is calling this function and waitting for reconfig mutex. So
+ * there is a deadlock. We workaround this issue with a trylock.
+ * FIXME: we could miss discard if we can't take reconfig mutex
*/
- if (!log->in_teardown) {
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
- log->in_teardown);
- /*
- * r5l_quiesce could run after in_teardown check and hold
- * mutex first. Superblock might get updated twice.
- */
- if (log->in_teardown)
- md_update_sb(mddev, 1);
- } else {
- WARN_ON(!mddev_is_locked(mddev));
- md_update_sb(mddev, 1);
- }
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ if (!mddev_trylock(mddev))
+ return;
+ md_update_sb(mddev, 1);
+ mddev_unlock(mddev);
/* discard IO error really doesn't matter, ignore it */
if (log->last_checkpoint < end) {
@@ -827,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
if (!log || state == 2)
return;
if (state == 0) {
- log->in_teardown = 0;
/*
* This is a special case for hotadd. In suspend, the array has
* no journal. In resume, journal is initialized as well as the
@@ -838,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
log->rdev->mddev, "reclaim");
} else if (state == 1) {
- /*
- * at this point all stripes are finished, so io_unit is at
- * least in STRIPE_END state
- */
- log->in_teardown = 1;
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index da583bb43c84..ee7fc3701700 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2423,10 +2423,10 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
+ bio_reset(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
- bio_reset(bi);
}
static void raid5_end_write_request(struct bio *bi)
@@ -2498,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_error && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
+ bio_reset(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -2505,7 +2506,6 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && sh != sh->batch_head)
raid5_release_stripe(sh->batch_head);
- bio_reset(bi);
}
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
@@ -6639,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
}
conf->min_nr_stripes = NR_STRIPES;
+ if (mddev->reshape_position != MaxSector) {
+ int stripes = max_t(int,
+ ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
+ ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
+ conf->min_nr_stripes = max(NR_STRIPES, stripes);
+ if (conf->min_nr_stripes != NR_STRIPES)
+ printk(KERN_INFO
+ "md/raid:%s: force stripe size %d for reshape\n",
+ mdname(mddev), conf->min_nr_stripes);
+ }
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c
index 70018247bdda..5719b991e340 100644
--- a/drivers/media/cec-edid.c
+++ b/drivers/media/cec-edid.c
@@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
u8 tag = edid[i] >> 5;
u8 len = edid[i] & 0x1f;
- if (tag == 3 && len >= 5 && i + len <= end)
+ if (tag == 3 && len >= 5 && i + len <= end &&
+ edid[i + 1] == 0x03 &&
+ edid[i + 2] == 0x0c &&
+ edid[i + 3] == 0x00)
return i + 4;
i += len + 1;
} while (i < end);
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index efec2d1a7afd..4d080da7afaf 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index db987e5b93eb..59a4b5f7724e 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
ret = vb2_queue_init(q);
if (ret) {
vb2_dvb_dealloc_frontends(&dev->frontends);
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index ca417a454d67..791a5161809b 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err)
return err;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f25344bc7912..552b635cfce7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU
config VIDEO_MEDIATEK_VCODEC
tristate "Mediatek Video Codec driver"
depends on MTK_IOMMU || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 94f0a425be42..3a8e6958adae 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -23,7 +23,6 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
-#include "mtk_vcodec_util.h"
#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 3ed3f2d31df5..2c5719ac23b2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret, i;
struct mtk_video_fmt *fmt;
- unsigned int pitch_w_div16;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
- pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16);
- if (pitch_w_div16 % 8 != 0) {
- /* Adjust returned width/height, so application could correctly
- * allocate hw required memory
- */
- q_data->visible_height += 32;
- vidioc_try_fmt(f, q_data->fmt);
- }
-
q_data->field = f->fmt.pix_mp.field;
ctx->colorspace = f->fmt.pix_mp.colorspace;
ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
@@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv)
{
struct mtk_vcodec_ctx *ctx = priv;
int ret;
- struct vb2_buffer *dst_buf;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
@@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv)
mtk_v4l2_err("venc_if_encode failed=%d", ret);
return -EINVAL;
}
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf) {
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+ } else {
+ mtk_v4l2_err("No timestamp for the header buffer.");
+ }
ctx->state = MTK_STATE_HEADER;
dst_buf->planes[0].bytesused = enc_result.bs_size;
@@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work)
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
int ret, i;
- struct vb2_v4l2_buffer *vb2_v4l2;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
/* check dst_buf, dst_buf may be removed in device_run
* to stored encdoe header so we need check dst_buf and
@@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work)
ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
&frm_buf, &bs_buf, &enc_result);
- vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+
if (enc_result.is_key_frm)
- vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
if (ret) {
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
@@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
- 0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+ 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
@@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
{
- venc_if_deinit(ctx);
+ int ret = venc_if_deinit(ctx);
+
+ if (ret)
+ mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+ ctx->state = MTK_STATE_FREE;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index c7806ecda2dd..5cd2151431bf 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file)
mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
+ /*
+ * Call v4l2_m2m_ctx_release to make sure the worker thread is not
+ * running after venc_if_deinit.
+ */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
list_del_init(&ctx->list);
dev->num_instances--;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
index 33e890f5aa9c..12131855b46a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
@@ -16,7 +16,6 @@
#define _MTK_VCODEC_INTR_H_
#define MTK_INST_IRQ_RECEIVED 0x1
-#define MTK_INST_WORK_THREAD_ABORT_DONE 0x2
struct mtk_vcodec_ctx;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 9a600525b3c1..63d4be4ff327 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -61,6 +61,8 @@ enum venc_h264_bs_mode {
/*
* struct venc_h264_vpu_config - Structure for h264 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @input_fourcc: input fourcc
* @bitrate: target bitrate (in bps)
* @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -94,13 +96,13 @@ struct venc_h264_vpu_config {
/*
* struct venc_h264_vpu_buf - Structure for buffer information
- * @align: buffer alignment (in bytes)
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @iova: IO virtual address
* @vpua: VPU side memory addr which is used by RC_CODE
* @size: buffer size (in bytes)
*/
struct venc_h264_vpu_buf {
- u32 align;
u32 iova;
u32 vpua;
u32 size;
@@ -108,6 +110,8 @@ struct venc_h264_vpu_buf {
/*
* struct venc_h264_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* This structure is allocated in VPU side and shared to AP side.
* @config: h264 encoder configuration
* @work_bufs: working buffer information in VPU side
@@ -150,12 +154,6 @@ struct venc_h264_inst {
struct mtk_vcodec_ctx *ctx;
};
-static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr,
- u32 val)
-{
- writel(val, inst->hw_base + addr);
-}
-
static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
{
return readl(inst->hw_base + addr);
@@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
return 40;
case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
default:
mtk_vcodec_debug(inst, "unsupported level %d", level);
return 31;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 60bbcd2a0510..6d9758479f9a 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf {
/*
* struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @input_fourcc: input fourcc
* @bitrate: target bitrate (in bps)
* @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -83,14 +85,14 @@ struct venc_vp8_vpu_config {
};
/*
- * struct venc_vp8_vpu_buf -Structure for buffer information
- * @align: buffer alignment (in bytes)
+ * struct venc_vp8_vpu_buf - Structure for buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @iova: IO virtual address
* @vpua: VPU side memory addr which is used by RC_CODE
* @size: buffer size (in bytes)
*/
struct venc_vp8_vpu_buf {
- u32 align;
u32 iova;
u32 vpua;
u32 size;
@@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf {
/*
* struct venc_vp8_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* This structure is allocated in VPU side and shared to AP side.
* @config: vp8 encoder configuration
* @work_bufs: working buffer information in VPU side
@@ -138,12 +142,6 @@ struct venc_vp8_inst {
struct mtk_vcodec_ctx *ctx;
};
-static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr,
- u32 val)
-{
- writel(val, inst->hw_base + addr);
-}
-
static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
{
return readl(inst->hw_base + addr);
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 6a7bcc3028b1..bc50c69ee0c5 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
*/
int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
+ int error;
+
if (!fcp)
return 0;
- return pm_runtime_get_sync(fcp->dev);
+ error = pm_runtime_get_sync(fcp->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rcar_fcp_enable);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 32380d5d4f6b..767af2026f8b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- dev_info(&slot->mmc->class_dev,
- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
- slot->id, host->bus_hz, clock,
- div ? ((host->bus_hz / div) >> 1) :
- host->bus_hz, div);
+ if (clock != slot->__clk_old || force_clkinit)
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+
+ /* keep the last clock value that was requested from core */
+ slot->__clk_old = clock;
}
host->current_speed = clock;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 9e740bc232a8..e8cd2dec3263 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
* @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @__clk_old: The last clock value that was requested from core.
+ * Keeping track of this helps us to avoid spamming the console.
* @flags: Random state bits associated with the slot.
* @id: Number of this slot.
* @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -263,6 +265,7 @@ struct dw_mci_slot {
struct list_head queue_node;
unsigned int clock;
+ unsigned int __clk_old;
unsigned long flags;
#define DW_MMC_CARD_PRESENT 0
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index f23d65eb070d..be3c49fa7382 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
/* Only reconfigure if we have a different burst size */
if (*bp != burst) {
- struct dma_slave_config cfg;
-
- cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- cfg.src_maxburst = burst;
- cfg.dst_maxburst = burst;
+ struct dma_slave_config cfg = {
+ .src_addr = host->phys_base +
+ OMAP_MMC_REG(host, DATA),
+ .dst_addr = host->phys_base +
+ OMAP_MMC_REG(host, DATA),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .src_maxburst = burst,
+ .dst_maxburst = burst,
+ };
if (dmaengine_slave_config(c, &cfg))
goto use_pio;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 24ebc9a8de89..5f2f24a7360d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- struct dma_slave_config cfg;
struct dma_async_tx_descriptor *tx;
int ret = 0, i;
struct mmc_data *data = req->data;
struct dma_chan *chan;
+ struct dma_slave_config cfg = {
+ .src_addr = host->mapbase + OMAP_HSMMC_DATA,
+ .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = data->blksz / 4,
+ .dst_maxburst = data->blksz / 4,
+ };
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
chan = omap_hsmmc_get_dma_chan(host, data);
- cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
- cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.src_maxburst = data->blksz / 4;
- cfg.dst_maxburst = data->blksz / 4;
-
ret = dmaengine_slave_config(chan, &cfg);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index c95ba83366a0..ed92ce729dde 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -28,6 +28,7 @@
struct st_mmc_platform_data {
struct reset_control *rstc;
+ struct clk *icnclk;
void __iomem *top_ioaddr;
};
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct st_mmc_platform_data *pdata;
struct sdhci_pltfm_host *pltfm_host;
- struct clk *clk;
+ struct clk *clk, *icnclk;
int ret = 0;
u16 host_version;
struct resource *res;
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
+ /* ICN clock isn't compulsory, but use it if it's provided. */
+ icnclk = devm_clk_get(&pdev->dev, "icn");
+ if (IS_ERR(icnclk))
+ icnclk = NULL;
+
rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(rstc))
rstc = NULL;
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
clk_prepare_enable(clk);
+ clk_prepare_enable(icnclk);
/* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
pltfm_host->clk = clk;
+ pdata->icnclk = icnclk;
/* Configure the Arasan HC inside the flashSS */
st_mmcss_cconfig(np, host);
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
return 0;
err_out:
+ clk_disable_unprepare(icnclk);
clk_disable_unprepare(clk);
err_of:
sdhci_pltfm_free(pdev);
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
ret = sdhci_pltfm_unregister(pdev);
+ clk_disable_unprepare(pdata->icnclk);
+
if (rstc)
reset_control_assert(rstc);
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
if (pdata->rstc)
reset_control_assert(pdata->rstc);
+ clk_disable_unprepare(pdata->icnclk);
clk_disable_unprepare(pltfm_host->clk);
out:
return ret;
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
struct device_node *np = dev->of_node;
clk_prepare_enable(pltfm_host->clk);
+ clk_prepare_enable(pdata->icnclk);
if (pdata->rstc)
reset_control_deassert(pdata->rstc);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index cc07ba0f044d..27fa8b87cd5f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
unsigned long flags;
u32 val;
+ /* Reset ECC hardware */
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
spin_lock_irqsave(&davinci_nand_lock, flags);
/* Start 4-bit ECC calculation for read/write */
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 25a4fbd4d24a..d54f666417e1 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes)
{
dma_addr_t addr;
- u32 *p, len, i;
+ u8 *p;
+ u32 len, i, val;
int ret = 0;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
- p = (u32 *)(data + bytes);
+ p = data + bytes;
/* write the parity bytes generated by the ECC back to the OOB region */
- for (i = 0; i < len; i++)
- p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0)
+ val = readl(ecc->regs + ECC_ENCPAR(i / 4));
+ p[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index ddaa2acb9dd7..5223a2182ee4 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -93,6 +93,9 @@
#define NFI_FSM_MASK (0xf << 16)
#define NFI_ADDRCNTR (0x70)
#define CNTR_MASK GENMASK(16, 12)
+#define ADDRCNTR_SEC_SHIFT (12)
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
#define NFI_STRADDR (0x80)
#define NFI_BYTELEN (0x84)
#define NFI_CSEL (0x90)
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
}
ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
- (reg & CNTR_MASK) >= chip->ecc.steps,
+ ADDRCNTR_SEC(reg) >= chip->ecc.steps,
10, MTK_TIMEOUT);
if (ret)
dev_err(dev, "hwecc write timeout\n");
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
dev_warn(nfc->dev, "read ahb/dma done timeout\n");
rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
- (reg & CNTR_MASK) >= sectors, 10,
+ ADDRCNTR_SEC(reg) >= sectors, 10,
MTK_TIMEOUT);
if (rc < 0) {
dev_err(nfc->dev, "subpage done timeout\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 5173fadc9a4e..57cbe2b83849 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
- if (section > nand_chip->ecc.steps)
+ if (section >= nand_chip->ecc.steps)
return -ERANGE;
if (!section) {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index a59361c36f40..5513bfd9cdc9 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -2169,7 +2169,7 @@ scan_tail:
return 0;
return_error:
- if (info->dma)
+ if (!IS_ERR_OR_NULL(info->dma))
dma_release_channel(info->dma);
if (nand_chip->ecc.priv) {
nand_bch_free(nand_chip->ecc.priv);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c5415b05ea9..95c32f2d7601 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -149,6 +149,8 @@ config IPVLAN
tristate "IP-VLAN support"
depends on INET
depends on IPV6
+ depends on NETFILTER
+ depends on NET_L3_MASTER_DEV
---help---
This allows one to create virtual devices off of a main interface
and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e21f7cc5ae4d..8d6208c0b400 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
@@ -501,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
/*
* CAN device restart for bus-off recovery
*/
-static void can_restart(unsigned long data)
+static void can_restart(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *)data;
struct can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
@@ -543,6 +543,14 @@ restart:
netdev_err(dev, "Error %d during restart", err);
}
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
+
+ can_restart(priv->dev);
+}
+
int can_restart_now(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -556,8 +564,8 @@ int can_restart_now(struct net_device *dev)
if (priv->state != CAN_STATE_BUS_OFF)
return -EBUSY;
- /* Runs as soon as possible in the timer context */
- mod_timer(&priv->restart_timer, jiffies);
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_restart(dev);
return 0;
}
@@ -578,8 +586,8 @@ void can_bus_off(struct net_device *dev)
netif_carrier_off(dev);
if (priv->restart_ms)
- mod_timer(&priv->restart_timer,
- jiffies + (priv->restart_ms * HZ) / 1000);
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
}
EXPORT_SYMBOL_GPL(can_bus_off);
@@ -688,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
return NULL;
priv = netdev_priv(dev);
+ priv->dev = dev;
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;
@@ -697,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
priv->state = CAN_STATE_STOPPED;
- init_timer(&priv->restart_timer);
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
return dev;
}
@@ -778,8 +787,6 @@ int open_candev(struct net_device *dev)
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
- setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(open_candev);
@@ -794,7 +801,7 @@ void close_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- del_timer_sync(&priv->restart_timer);
+ cancel_delayed_work_sync(&priv->restart_work);
can_flush_echo_skb(dev);
}
EXPORT_SYMBOL_GPL(close_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 41c0fc9f3b14..16f7cadda5c3 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = flexcan_chip_disable(priv);
- if (err)
- return err;
-
if (netif_running(dev)) {
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
netif_stop_queue(dev);
netif_device_detach(dev);
}
@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
}
- return flexcan_chip_enable(priv);
+ return 0;
}
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2d1d22eec750..368bb0710d8f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -81,6 +81,10 @@
#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
#define IFI_CANFD_TDELAY 0x1c
+#define IFI_CANFD_TDELAY_DEFAULT 0xb
+#define IFI_CANFD_TDELAY_MASK 0x3fff
+#define IFI_CANFD_TDELAY_ABS BIT(14)
+#define IFI_CANFD_TDELAY_EN BIT(15)
#define IFI_CANFD_ERROR 0x20
#define IFI_CANFD_ERROR_TX_OFFSET 0
@@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u16 brp, sjw, tseg1, tseg2;
+ u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
brp = bt->brp - 2;
@@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
(sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_FTIME);
+
+ /* Configure transmitter delay */
+ tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
+ priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0afc2e5a04dc..7717b19dc806 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -764,11 +764,6 @@ static int b53_get_sset_count(struct dsa_switch *ds)
return b53_get_mib_size(dev);
}
-static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- return 0;
-}
-
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -1407,16 +1402,12 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
}
}
-static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
- u8 state)
+static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
{
struct b53_device *dev = ds->priv;
- u8 hw_state, cur_hw_state;
+ u8 hw_state;
u8 reg;
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
- cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
-
switch (state) {
case BR_STATE_DISABLED:
hw_state = PORT_CTRL_DIS_STATE;
@@ -1438,26 +1429,20 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
return;
}
- /* Fast-age ARL entries if we are moving a port from Learning or
- * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
- * state (hw_state)
- */
- if (cur_hw_state != hw_state) {
- if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
- hw_state <= PORT_CTRL_LISTEN_STATE) {
- if (b53_fast_age_port(dev, port)) {
- dev_err(ds->dev, "fast ageing failed\n");
- return;
- }
- }
- }
-
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
reg &= ~PORT_CTRL_STP_STATE_MASK;
reg |= hw_state;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
}
+static void b53_br_fast_age(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (b53_fast_age_port(dev, port))
+ dev_err(ds->dev, "fast ageing failed\n");
+}
+
static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
{
return DSA_TAG_PROTO_NONE;
@@ -1466,7 +1451,6 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
static struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
- .set_addr = b53_set_addr,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
@@ -1478,6 +1462,7 @@ static struct dsa_switch_ops b53_switch_ops = {
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 697103934317..10ce820daa48 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += global1.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 70a812d159c9..883fd9809dd2 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -31,6 +31,7 @@
#include <net/switchdev.h>
#include "mv88e6xxx.h"
+#include "global1.h"
#include "global2.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
@@ -97,7 +98,7 @@ static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = {
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_single_chip_ops = {
.read = mv88e6xxx_smi_single_chip_read,
.write = mv88e6xxx_smi_single_chip_write,
};
@@ -179,7 +180,7 @@ static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = {
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_multi_chip_ops = {
.read = mv88e6xxx_smi_multi_chip_read,
.write = mv88e6xxx_smi_multi_chip_write,
};
@@ -216,15 +217,31 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
+static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
- if (!chip->phy_ops)
+ if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
- return chip->phy_ops->read(chip, addr, reg, val);
+ return chip->info->ops->phy_read(chip, addr, reg, val);
}
static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
@@ -232,10 +249,10 @@ static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
{
int addr = phy; /* PHY devices addresses start at 0x0 */
- if (!chip->phy_ops)
+ if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
- return chip->phy_ops->write(chip, addr, reg, val);
+ return chip->info->ops->phy_write(chip, addr, reg, val);
}
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
@@ -345,46 +362,27 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
return mv88e6xxx_write(chip, addr, reg, val);
}
-static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
{
u16 val;
- int err;
+ int i, err;
- err = mv88e6xxx_read(chip, addr, reg, &val);
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
if (err)
return err;
- return val;
-}
-
-static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 val)
-{
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
-{
- int ret;
- int i;
-
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
- ret & ~GLOBAL_CONTROL_PPU_ENABLE);
- if (ret)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
+ val & ~GLOBAL_CONTROL_PPU_ENABLE);
+ if (err)
+ return err;
for (i = 0; i < 16; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
usleep_range(1000, 2000);
- if ((ret & GLOBAL_STATUS_PPU_MASK) !=
- GLOBAL_STATUS_PPU_POLLING)
+ if ((val & GLOBAL_STATUS_PPU_MASK) != GLOBAL_STATUS_PPU_POLLING)
return 0;
}
@@ -393,25 +391,25 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
{
- int ret, err, i;
+ u16 val;
+ int i, err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_CONTROL, &val);
+ if (err)
+ return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
- ret | GLOBAL_CONTROL_PPU_ENABLE);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL,
+ val | GLOBAL_CONTROL_PPU_ENABLE);
if (err)
return err;
for (i = 0; i < 16; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATUS, &val);
+ if (err)
+ return err;
usleep_range(1000, 2000);
- if ((ret & GLOBAL_STATUS_PPU_MASK) ==
- GLOBAL_STATUS_PPU_POLLING)
+ if ((val & GLOBAL_STATUS_PPU_MASK) == GLOBAL_STATUS_PPU_POLLING)
return 0;
}
@@ -517,11 +515,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
return err;
}
-static const struct mv88e6xxx_ops mv88e6xxx_phy_ppu_ops = {
- .read = mv88e6xxx_phy_ppu_read,
- .write = mv88e6xxx_phy_ppu_write,
-};
-
static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
{
return chip->info->family == MV88E6XXX_FAMILY_6065;
@@ -562,21 +555,6 @@ static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
return chip->info->family == MV88E6XXX_FAMILY_6352;
}
-static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
-{
- return chip->info->num_databases;
-}
-
-static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
-{
- /* Does the device have dedicated FID registers for ATU and VTU ops? */
- if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip))
- return true;
-
- return false;
-}
-
/* We expect the switch to perform auto negotiation if there is a real
* phy. However, in the case of a fixed link phy, we force the port
* settings from the fixed link settings.
@@ -585,23 +563,23 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u32 reg;
- int ret;
+ u16 reg;
+ int err;
if (!phy_is_pseudo_fixed_link(phydev))
return;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
goto out;
- reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
- PORT_PCS_CTRL_FORCE_LINK |
- PORT_PCS_CTRL_DUPLEX_FULL |
- PORT_PCS_CTRL_FORCE_DUPLEX |
- PORT_PCS_CTRL_UNFORCED);
+ reg &= ~(PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX |
+ PORT_PCS_CTRL_UNFORCED);
reg |= PORT_PCS_CTRL_FORCE_LINK;
if (phydev->link)
@@ -630,7 +608,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= PORT_PCS_CTRL_DUPLEX_FULL;
if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
- (port >= chip->info->num_ports - 2)) {
+ (port >= mv88e6xxx_num_ports(chip) - 2)) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -639,7 +617,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
}
- _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg);
+ mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
out:
mutex_unlock(&chip->reg_lock);
@@ -647,12 +625,12 @@ out:
static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
{
- int ret;
- int i;
+ u16 val;
+ int i, err;
for (i = 0; i < 10; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP);
- if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_OP, &val);
+ if ((val & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -661,55 +639,52 @@ static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
- int ret;
+ int err;
if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (err)
+ return err;
/* Wait for the snapshotting to complete. */
- ret = _mv88e6xxx_stats_wait(chip);
- if (ret < 0)
- return ret;
-
- return 0;
+ return _mv88e6xxx_stats_wait(chip);
}
static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
int stat, u32 *val)
{
- u32 _val;
- int ret;
+ u32 value;
+ u16 reg;
+ int err;
*val = 0;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED |
- GLOBAL_STATS_OP_HIST_RX_TX | stat);
- if (ret < 0)
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
+ if (err)
return;
- ret = _mv88e6xxx_stats_wait(chip);
- if (ret < 0)
+ err = _mv88e6xxx_stats_wait(chip);
+ if (err)
return;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
- if (ret < 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_32, &reg);
+ if (err)
return;
- _val = ret << 16;
+ value = reg << 16;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
- if (ret < 0)
+ err = mv88e6xxx_g1_read(chip, GLOBAL_STATS_COUNTER_01, &reg);
+ if (err)
return;
- *val = _val | ret;
+ *val = value | reg;
}
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
@@ -799,22 +774,22 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
{
u32 low;
u32 high = 0;
- int ret;
+ int err;
+ u16 reg;
u64 value;
switch (s->type) {
case PORT:
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
+ if (err)
return UINT64_MAX;
- low = ret;
+ low = reg;
if (s->sizeof_stat == 4) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port),
- s->reg + 1);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
+ if (err)
return UINT64_MAX;
- high = ret;
+ high = reg;
}
break;
case BANK0:
@@ -893,6 +868,8 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+ u16 reg;
u16 *p = _p;
int i;
@@ -903,11 +880,10 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
for (i = 0; i < 32; i++) {
- int ret;
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i);
- if (ret >= 0)
- p[i] = ret;
+ err = mv88e6xxx_port_read(chip, port, i, &reg);
+ if (!err)
+ p[i] = reg;
}
mutex_unlock(&chip->reg_lock);
@@ -915,8 +891,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP,
- GLOBAL_ATU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, GLOBAL_ATU_OP, GLOBAL_ATU_OP_BUSY);
}
static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
@@ -938,7 +913,7 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- err = mv88e6xxx_read(chip, REG_PORT(port), PORT_STATUS, &reg);
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
if (err)
goto out;
@@ -980,32 +955,31 @@ out:
static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
{
- int ret;
+ u16 val;
+ int err;
- if (mv88e6xxx_has_fid_reg(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID,
- fid);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_ATU_FID)) {
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_FID, fid);
+ if (err)
+ return err;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* ATU DBNum[7:4] are located in ATU Control 15:12 */
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
- (ret & 0xfff) |
- ((fid << 8) & 0xf000));
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
+ (val & 0xfff) | ((fid << 8) & 0xf000));
+ if (err)
+ return err;
/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
cmd |= fid & 0xf;
}
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_OP, cmd);
+ if (err)
+ return err;
return _mv88e6xxx_atu_wait(chip);
}
@@ -1030,7 +1004,7 @@ static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
data |= (entry->portv_trunkid << shift) & mask;
}
- return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_DATA, data);
}
static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
@@ -1106,57 +1080,45 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
u8 state)
{
struct dsa_switch *ds = chip->ds;
- int reg, ret = 0;
+ u16 reg;
+ int err;
u8 oldstate;
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL);
- if (reg < 0)
- return reg;
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
oldstate = reg & PORT_CONTROL_STATE_MASK;
- if (oldstate != state) {
- /* Flush forwarding database if we're moving a port
- * from Learning or Forwarding state to Disabled or
- * Blocking or Listening state.
- */
- if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
- oldstate == PORT_CONTROL_STATE_FORWARDING) &&
- (state == PORT_CONTROL_STATE_DISABLED ||
- state == PORT_CONTROL_STATE_BLOCKING)) {
- ret = _mv88e6xxx_atu_remove(chip, 0, port, false);
- if (ret)
- return ret;
- }
+ reg &= ~PORT_CONTROL_STATE_MASK;
+ reg |= state;
- reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL,
- reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
- netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
- mv88e6xxx_port_state_names[state],
- mv88e6xxx_port_state_names[oldstate]);
- }
+ netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
+ mv88e6xxx_port_state_names[state],
+ mv88e6xxx_port_state_names[oldstate]);
- return ret;
+ return 0;
}
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
struct net_device *bridge = chip->ports[port].bridge_dev;
- const u16 mask = (1 << chip->info->num_ports) - 1;
+ const u16 mask = (1 << mv88e6xxx_num_ports(chip)) - 1;
struct dsa_switch *ds = chip->ds;
u16 output_ports = 0;
- int reg;
+ u16 reg;
+ int err;
int i;
/* allow CPU port or DSA link(s) to send frames to every port */
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
output_ports = mask;
} else {
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
/* allow sending frames to every group member */
if (bridge && chip->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
@@ -1170,14 +1132,14 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
- if (reg < 0)
- return reg;
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
reg &= ~mask;
reg |= output_ports & mask;
- return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg);
+ return mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
}
static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -1214,27 +1176,39 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
mv88e6xxx_port_state_names[stp_state]);
}
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = _mv88e6xxx_atu_remove(chip, 0, port, false);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
+}
+
static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
u16 *new, u16 *old)
{
struct dsa_switch *ds = chip->ds;
- u16 pvid;
- int ret;
+ u16 pvid, reg;
+ int err;
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, &reg);
+ if (err)
+ return err;
- pvid = ret & PORT_DEFAULT_VLAN_MASK;
+ pvid = reg & PORT_DEFAULT_VLAN_MASK;
if (new) {
- ret &= ~PORT_DEFAULT_VLAN_MASK;
- ret |= *new & PORT_DEFAULT_VLAN_MASK;
+ reg &= ~PORT_DEFAULT_VLAN_MASK;
+ reg |= *new & PORT_DEFAULT_VLAN_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_DEFAULT_VLAN, ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, reg);
+ if (err)
+ return err;
netdev_dbg(ds->ports[port].netdev,
"DefaultVID %d (was %d)\n", *new, pvid);
@@ -1260,17 +1234,16 @@ static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP,
- GLOBAL_VTU_OP_BUSY);
+ return mv88e6xxx_g1_wait(chip, GLOBAL_VTU_OP, GLOBAL_VTU_OP_BUSY);
}
static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
{
- int ret;
+ int err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_OP, op);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_wait(chip);
}
@@ -1287,23 +1260,21 @@ static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip)
}
static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry,
+ struct mv88e6xxx_vtu_entry *entry,
unsigned int nibble_offset)
{
u16 regs[3];
- int i;
- int ret;
+ int i, err;
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_DATA_0_3 + i);
- if (ret < 0)
- return ret;
+ u16 *reg = &regs[i];
- regs[i] = ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
}
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u16 reg = regs[i / 4];
@@ -1314,26 +1285,25 @@ static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
}
static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
}
static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry,
+ struct mv88e6xxx_vtu_entry *entry,
unsigned int nibble_offset)
{
u16 regs[3] = { 0 };
- int i;
- int ret;
+ int i, err;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u8 data = entry->data[i];
@@ -1341,86 +1311,85 @@ static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
}
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL,
- GLOBAL_VTU_DATA_0_3 + i, regs[i]);
- if (ret < 0)
- return ret;
+ u16 reg = regs[i];
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_DATA_0_3 + i, reg);
+ if (err)
+ return err;
}
return 0;
}
static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
}
static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
}
static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
{
- return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID,
- vid & GLOBAL_VTU_VID_MASK);
+ return mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID,
+ vid & GLOBAL_VTU_VID_MASK);
}
static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_stu_entry next = { 0 };
- int ret;
+ struct mv88e6xxx_vtu_entry next = { 0 };
+ u16 val;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
- next.vid = ret & GLOBAL_VTU_VID_MASK;
- next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+ next.vid = val & GLOBAL_VTU_VID_MASK;
+ next.valid = !!(val & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_vtu_data_read(chip, &next);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_vtu_data_read(chip, &next);
+ if (err)
+ return err;
- if (mv88e6xxx_has_fid_reg(chip)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_FID);
- if (ret < 0)
- return ret;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_FID, &val);
+ if (err)
+ return err;
- next.fid = ret & GLOBAL_VTU_FID_MASK;
+ next.fid = val & GLOBAL_VTU_FID_MASK;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
*/
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_OP);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_OP, &val);
+ if (err)
+ return err;
- next.fid = (ret & 0xf00) >> 4;
- next.fid |= ret & 0xf;
+ next.fid = (val & 0xf00) >> 4;
+ next.fid |= val & 0xf;
}
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_VTU_SID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
- next.sid = ret & GLOBAL_VTU_SID_MASK;
+ next.sid = val & GLOBAL_VTU_SID_MASK;
}
}
@@ -1433,7 +1402,7 @@ static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_stu_entry next;
+ struct mv88e6xxx_vtu_entry next;
u16 pvid;
int err;
@@ -1484,38 +1453,36 @@ unlock:
}
static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
u16 reg = 0;
- int ret;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
if (!entry->valid)
goto loadpurge;
/* Write port member tags */
- ret = mv88e6xxx_vtu_data_write(chip, entry);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_vtu_data_write(chip, entry);
+ if (err)
+ return err;
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
- reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
+ if (err)
+ return err;
}
- if (mv88e6xxx_has_fid_reg(chip)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G1_VTU_FID)) {
reg = entry->fid & GLOBAL_VTU_FID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID,
- reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_FID, reg);
+ if (err)
+ return err;
} else if (mv88e6xxx_num_databases(chip) == 256) {
/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
* VTU DBNum[3:0] are located in VTU Operation 3:0
@@ -1527,48 +1494,49 @@ static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_cmd(chip, op);
}
static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
- struct mv88e6xxx_vtu_stu_entry next = { 0 };
- int ret;
+ struct mv88e6xxx_vtu_entry next = { 0 };
+ u16 val;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
- sid & GLOBAL_VTU_SID_MASK);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID,
+ sid & GLOBAL_VTU_SID_MASK);
+ if (err)
+ return err;
- ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_SID, &val);
+ if (err)
+ return err;
- next.sid = ret & GLOBAL_VTU_SID_MASK;
+ next.sid = val & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_VTU_VID, &val);
+ if (err)
+ return err;
- next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+ next.valid = !!(val & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = mv88e6xxx_stu_data_read(chip, &next);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_stu_data_read(chip, &next);
+ if (err)
+ return err;
}
*entry = next;
@@ -1576,33 +1544,33 @@ static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
}
static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
u16 reg = 0;
- int ret;
+ int err;
- ret = _mv88e6xxx_vtu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_vtu_wait(chip);
+ if (err)
+ return err;
if (!entry->valid)
goto loadpurge;
/* Write port states */
- ret = mv88e6xxx_stu_data_write(chip, entry);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_stu_data_write(chip, entry);
+ if (err)
+ return err;
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_VID, reg);
+ if (err)
+ return err;
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_VTU_SID, reg);
+ if (err)
+ return err;
return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
@@ -1613,7 +1581,8 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
struct dsa_switch *ds = chip->ds;
u16 upper_mask;
u16 fid;
- int ret;
+ u16 reg;
+ int err;
if (mv88e6xxx_num_databases(chip) == 4096)
upper_mask = 0xff;
@@ -1623,37 +1592,35 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
return -EOPNOTSUPP;
/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
- fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
+ fid = (reg & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
if (new) {
- ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
- ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
+ reg &= ~PORT_BASE_VLAN_FID_3_0_MASK;
+ reg |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN,
- ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
}
/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &reg);
+ if (err)
+ return err;
- fid |= (ret & upper_mask) << 4;
+ fid |= (reg & upper_mask) << 4;
if (new) {
- ret &= ~upper_mask;
- ret |= (*new >> 4) & upper_mask;
+ reg &= ~upper_mask;
+ reg |= (*new >> 4) & upper_mask;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
- ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, reg);
+ if (err)
+ return err;
netdev_dbg(ds->ports[port].netdev,
"FID %d (was %d)\n", *new, fid);
@@ -1680,13 +1647,13 @@ static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
err = _mv88e6xxx_port_fid_get(chip, i, fid);
if (err)
return err;
@@ -1722,10 +1689,10 @@ static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+ struct mv88e6xxx_vtu_entry *entry)
{
struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_stu_entry vlan = {
+ struct mv88e6xxx_vtu_entry vlan = {
.valid = true,
.vid = vid,
};
@@ -1736,14 +1703,14 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
return err;
/* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < chip->info->num_ports; ++i)
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
- struct mv88e6xxx_vtu_stu_entry vstp;
+ struct mv88e6xxx_vtu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
* implemented, only one STU entry is needed to cover all VTU
@@ -1770,7 +1737,7 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
}
static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
+ struct mv88e6xxx_vtu_entry *entry, bool creat)
{
int err;
@@ -1802,7 +1769,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
if (!vid_begin)
@@ -1825,7 +1792,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (vlan.vid > vid_end)
break;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
@@ -1865,26 +1832,26 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
- int ret;
+ u16 reg;
+ int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
goto unlock;
- old = ret & PORT_CONTROL_2_8021Q_MASK;
+ old = reg & PORT_CONTROL_2_8021Q_MASK;
if (new != old) {
- ret &= ~PORT_CONTROL_2_8021Q_MASK;
- ret |= new & PORT_CONTROL_2_8021Q_MASK;
+ reg &= ~PORT_CONTROL_2_8021Q_MASK;
+ reg |= new & PORT_CONTROL_2_8021Q_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2,
- ret);
- if (ret < 0)
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
goto unlock;
netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
@@ -1892,11 +1859,11 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
mv88e6xxx_port_8021q_mode_names[old]);
}
- ret = 0;
+ err = 0;
unlock:
mutex_unlock(&chip->reg_lock);
- return ret;
+ return err;
}
static int
@@ -1927,7 +1894,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
u16 vid, bool untagged)
{
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int err;
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
@@ -1972,7 +1939,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
int port, u16 vid)
{
struct dsa_switch *ds = chip->ds;
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
@@ -1987,7 +1954,7 @@ static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
@@ -2041,14 +2008,13 @@ unlock:
static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
const unsigned char *addr)
{
- int i, ret;
+ int i, err;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_write(
- chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
- (addr[i * 2] << 8) | addr[i * 2 + 1]);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_MAC_01 + i,
+ (addr[i * 2] << 8) | addr[i * 2 + 1]);
+ if (err)
+ return err;
}
return 0;
@@ -2057,15 +2023,16 @@ static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
unsigned char *addr)
{
- int i, ret;
+ u16 val;
+ int i, err;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
- GLOBAL_ATU_MAC_01 + i);
- if (ret < 0)
- return ret;
- addr[i * 2] = ret >> 8;
- addr[i * 2 + 1] = ret & 0xff;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_MAC_01 + i, &val);
+ if (err)
+ return err;
+
+ addr[i * 2] = val >> 8;
+ addr[i * 2 + 1] = val & 0xff;
}
return 0;
@@ -2091,12 +2058,48 @@ static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
}
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+
+static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
+ const u8 *addr, struct mv88e6xxx_atu_entry *entry)
+{
+ struct mv88e6xxx_atu_entry next;
+ int err;
+
+ eth_broadcast_addr(next.mac);
+
+ err = _mv88e6xxx_atu_mac_write(chip, next.mac);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_atu_getnext(chip, fid, &next);
+ if (err)
+ return err;
+
+ if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (ether_addr_equal(next.mac, addr)) {
+ *entry = next;
+ return 0;
+ }
+ } while (!is_broadcast_ether_addr(next.mac));
+
+ memset(entry, 0, sizeof(*entry));
+ entry->fid = fid;
+ ether_addr_copy(entry->mac, addr);
+
+ return 0;
+}
+
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
- struct mv88e6xxx_atu_entry entry = { 0 };
- struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_vtu_entry vlan;
+ struct mv88e6xxx_atu_entry entry;
int err;
/* Null VLAN ID corresponds to the port private database */
@@ -2107,12 +2110,18 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- entry.fid = vlan.fid;
- entry.state = state;
- ether_addr_copy(entry.mac, addr);
- if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- entry.trunk = false;
- entry.portv_trunkid = BIT(port);
+ err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry);
+ if (err)
+ return err;
+
+ /* Purge the ATU entry only if no port is using it anymore */
+ if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry.portv_trunkid &= ~BIT(port);
+ if (!entry.portv_trunkid)
+ entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ } else {
+ entry.portv_trunkid |= BIT(port);
+ entry.state = state;
}
return _mv88e6xxx_atu_load(chip, &entry);
@@ -2159,31 +2168,32 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
- int ret;
+ u16 val;
+ int err;
next.fid = fid;
- ret = _mv88e6xxx_atu_wait(chip);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_wait(chip);
+ if (err)
+ return err;
- ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (err)
+ return err;
- ret = _mv88e6xxx_atu_mac_read(chip, next.mac);
- if (ret < 0)
- return ret;
+ err = _mv88e6xxx_atu_mac_read(chip, next.mac);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_DATA, &val);
+ if (err)
+ return err;
- next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ next.state = val & GLOBAL_ATU_DATA_STATE_MASK;
if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
unsigned int mask, shift;
- if (ret & GLOBAL_ATU_DATA_TRUNK) {
+ if (val & GLOBAL_ATU_DATA_TRUNK) {
next.trunk = true;
mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
@@ -2193,7 +2203,7 @@ static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
}
- next.portv_trunkid = (ret & mask) >> shift;
+ next.portv_trunkid = (val & mask) >> shift;
}
*entry = next;
@@ -2263,7 +2273,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
struct switchdev_obj *obj,
int (*cb)(struct switchdev_obj *obj))
{
- struct mv88e6xxx_vtu_stu_entry vlan = {
+ struct mv88e6xxx_vtu_entry vlan = {
.vid = GLOBAL_VTU_VID_MASK, /* all ones */
};
u16 fid;
@@ -2325,7 +2335,7 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
/* Assign the bridge and remap each port's VLANTable */
chip->ports[port].bridge_dev = bridge;
- for (i = 0; i < chip->info->num_ports; ++i) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
if (chip->ports[i].bridge_dev == bridge) {
err = _mv88e6xxx_port_based_vlan_map(chip, i);
if (err)
@@ -2349,7 +2359,7 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
/* Unassign the bridge and remap each port's VLANTable */
chip->ports[port].bridge_dev = NULL;
- for (i = 0; i < chip->info->num_ports; ++i)
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
if (i == port || chip->ports[i].bridge_dev == bridge)
if (_mv88e6xxx_port_based_vlan_map(chip, i))
netdev_warn(ds->ports[i].netdev,
@@ -2364,19 +2374,20 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
struct gpio_desc *gpiod = chip->reset;
unsigned long timeout;
- int ret;
+ u16 reg;
+ int err;
int i;
/* Set all ports to the disabled state. */
- for (i = 0; i < chip->info->num_ports; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL);
- if (ret < 0)
- return ret;
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ err = mv88e6xxx_port_read(chip, i, PORT_CONTROL, &reg);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL,
- ret & 0xfffc);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, i, PORT_CONTROL,
+ reg & 0xfffc);
+ if (err)
+ return err;
}
/* Wait for transmit queues to drain. */
@@ -2395,29 +2406,29 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
* through global registers 0x18 and 0x19.
*/
if (ppu_active)
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
+ err = mv88e6xxx_g1_write(chip, 0x04, 0xc000);
else
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
- if (ret)
- return ret;
+ err = mv88e6xxx_g1_write(chip, 0x04, 0xc400);
+ if (err)
+ return err;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_g1_read(chip, 0x00, &reg);
+ if (err)
+ return err;
- if ((ret & is_reset) == is_reset)
+ if ((reg & is_reset) == is_reset)
break;
usleep_range(1000, 2000);
}
if (time_after(jiffies, timeout))
- ret = -ETIMEDOUT;
+ err = -ETIMEDOUT;
else
- ret = 0;
+ err = 0;
- return ret;
+ return err;
}
static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
@@ -2438,21 +2449,10 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
return err;
}
-static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port,
- int reg, u16 *val)
-{
- int addr = chip->info->port_base_addr + port;
-
- if (port >= chip->info->num_ports)
- return -EINVAL;
-
- return mv88e6xxx_read(chip, addr, reg, val);
-}
-
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
- int ret;
+ int err;
u16 reg;
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
@@ -2465,7 +2465,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* and all DSA ports to their maximum bandwidth and
* full duplex.
*/
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
@@ -2480,10 +2480,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
reg |= PORT_PCS_CTRL_UNFORCED;
}
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PCS_CTRL, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
}
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
@@ -2534,26 +2533,25 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (reg) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_CONTROL, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
}
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
- if (ret < 0)
- return ret;
- ret &= PORT_STATUS_CMODE_MASK;
- if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
- (ret == PORT_STATUS_CMODE_1000BASE_X) ||
- (ret == PORT_STATUS_CMODE_SGMII)) {
- ret = mv88e6xxx_serdes_power_on(chip);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if (err)
+ return err;
+ reg &= PORT_STATUS_CMODE_MASK;
+ if ((reg == PORT_STATUS_CMODE_100BASE_X) ||
+ (reg == PORT_STATUS_CMODE_1000BASE_X) ||
+ (reg == PORT_STATUS_CMODE_SGMII)) {
+ err = mv88e6xxx_serdes_power_on(chip);
+ if (err < 0)
+ return err;
}
}
@@ -2587,10 +2585,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_CONTROL_2, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
+ return err;
}
/* Port Association Vector: when learning source addresses
@@ -2603,16 +2600,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (dsa_is_cpu_port(ds, port))
reg = 0;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR,
- reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_ASSOC_VECTOR, reg);
+ if (err)
+ return err;
/* Egress rate control 2: disable egress rate control. */
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2,
- 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL_2, 0x0000);
+ if (err)
+ return err;
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
@@ -2621,114 +2616,108 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PAUSE_CTRL, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+ if (err)
+ return err;
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_ATU_CONTROL, 0x0000);
+ err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL,
+ 0x0000);
/* Priority Override: disable DA, SA and VTU priority
* override.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PRI_OVERRIDE, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
+ 0x0000);
+ if (err)
+ return err;
/* Port Ethertype: use the Ethertype DSA Ethertype
* value.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_ETH_TYPE, ETH_P_EDSA);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_ETH_TYPE,
+ ETH_P_EDSA);
+ if (err)
+ return err;
}
/* Tag Remap: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_TAG_REGMAP_0123, 0x3210);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_0123,
+ 0x3210);
+ if (err)
+ return err;
/* Tag Remap 2: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_TAG_REGMAP_4567, 0x7654);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_4567,
+ 0x7654);
+ if (err)
+ return err;
}
/* Rate Control: disable ingress rate limiting. */
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
mv88e6xxx_6320_family(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_RATE_CONTROL, 0x0001);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
+ 0x0001);
+ if (err)
+ return err;
} else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_RATE_CONTROL, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
+ 0x0000);
+ if (err)
+ return err;
}
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
- 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
+ if (err)
+ return err;
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(chip, port, 0);
- if (ret)
- return ret;
+ err = _mv88e6xxx_port_fid_set(chip, port, 0);
+ if (err)
+ return err;
- ret = _mv88e6xxx_port_based_vlan_map(chip, port);
- if (ret)
- return ret;
+ err = _mv88e6xxx_port_based_vlan_map(chip, port);
+ if (err)
+ return err;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN,
- 0x0000);
- if (ret)
- return ret;
-
- return 0;
+ return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
}
-static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_01,
- (addr[0] << 8) | addr[1]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_23,
- (addr[2] << 8) | addr[3]);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
if (err)
return err;
- return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_45,
- (addr[4] << 8) | addr[5]);
+ return 0;
}
static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
@@ -2747,7 +2736,7 @@ static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
/* Round to nearest multiple of coeff */
age_time = (msecs + coeff / 2) / coeff;
- err = mv88e6xxx_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, &val);
+ err = mv88e6xxx_g1_read(chip, GLOBAL_ATU_CONTROL, &val);
if (err)
return err;
@@ -2755,7 +2744,7 @@ static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
val &= ~0xff0;
val |= age_time << 4;
- return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, val);
+ return mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL, val);
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
@@ -2786,7 +2775,7 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
reg |= GLOBAL_CONTROL_PPU_ENABLE;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL, reg);
if (err)
return err;
@@ -2796,15 +2785,14 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL,
- reg);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_MONITOR_CONTROL, reg);
if (err)
return err;
/* Disable remote management, and set the switch's DSA device number. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
- (ds->index & 0x1f));
+ err = mv88e6xxx_g1_write(chip, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
if (err)
return err;
@@ -2817,8 +2805,8 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
* enable address learn messages to be sent to all message
* ports.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARN2ALL);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARN2ALL);
if (err)
return err;
@@ -2832,39 +2820,39 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
return err;
/* Configure the IP ToS mapping registers. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_0, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_1, 0x0000);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_2, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_3, 0x5555);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_4, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_5, 0xaaaa);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_6, 0xffff);
if (err)
return err;
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IP_PRI_7, 0xffff);
if (err)
return err;
/* Configure the IEEE 802.1p priority mapping register. */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_IEEE_PRI, 0xfa41);
if (err)
return err;
/* Clear the statistics counters for all ports */
- err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_write(chip, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_FLUSH_ALL);
if (err)
return err;
@@ -2892,7 +2880,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
goto unlock;
/* Setup Switch Port Registers */
- for (i = 0; i < chip->info->num_ports; i++) {
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
err = mv88e6xxx_setup_port(chip, i);
if (err)
goto unlock;
@@ -2921,14 +2909,11 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
-
- /* Has an indirect Switch MAC/WoL/WoF register in Global 2? */
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_SWITCH_MAC))
- err = mv88e6xxx_g2_set_switch_mac(chip, addr);
- else
- err = mv88e6xxx_g1_set_switch_mac(chip, addr);
+ if (!chip->info->ops->set_switch_mac)
+ return -EOPNOTSUPP;
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->set_switch_mac(chip, addr);
mutex_unlock(&chip->reg_lock);
return err;
@@ -2940,7 +2925,7 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
u16 val;
int err;
- if (phy >= chip->info->num_ports)
+ if (phy >= mv88e6xxx_num_ports(chip))
return 0xffff;
mutex_lock(&chip->reg_lock);
@@ -2955,7 +2940,7 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
struct mv88e6xxx_chip *chip = bus->priv;
int err;
- if (phy >= chip->info->num_ports)
+ if (phy >= mv88e6xxx_num_ports(chip))
return 0xffff;
mutex_lock(&chip->reg_lock);
@@ -3183,13 +3168,11 @@ static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
- err = mv88e6xxx_g2_get_eeprom16(chip, eeprom, data);
- else
- err = -EOPNOTSUPP;
+ if (!chip->info->ops->get_eeprom)
+ return -EOPNOTSUPP;
+ mutex_lock(&chip->reg_lock);
+ err = chip->info->ops->get_eeprom(chip, eeprom, data);
mutex_unlock(&chip->reg_lock);
if (err)
@@ -3206,21 +3189,133 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
+ if (!chip->info->ops->set_eeprom)
+ return -EOPNOTSUPP;
+
if (eeprom->magic != 0xc3ec4951)
return -EINVAL;
mutex_lock(&chip->reg_lock);
-
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
- err = mv88e6xxx_g2_set_eeprom16(chip, eeprom, data);
- else
- err = -EOPNOTSUPP;
-
+ err = chip->info->ops->set_eeprom(chip, eeprom, data);
mutex_unlock(&chip->reg_lock);
return err;
}
+static const struct mv88e6xxx_ops mv88e6085_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6095_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6123_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6131_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6161_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6165_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_read,
+ .phy_write = mv88e6xxx_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6171_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6172_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6175_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6176_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6185_ops = {
+ .set_switch_mac = mv88e6xxx_g1_set_switch_mac,
+ .phy_read = mv88e6xxx_phy_ppu_read,
+ .phy_write = mv88e6xxx_phy_ppu_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6240_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6320_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6321_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6350_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6351_ops = {
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
+static const struct mv88e6xxx_ops mv88e6352_ops = {
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+};
+
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
[MV88E6085] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
@@ -3229,8 +3324,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 10,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ .ops = &mv88e6085_ops,
},
[MV88E6095] = {
@@ -3240,8 +3337,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 256,
.num_ports = 11,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6095,
+ .ops = &mv88e6095_ops,
},
[MV88E6123] = {
@@ -3251,8 +3350,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 3,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6123_ops,
},
[MV88E6131] = {
@@ -3262,8 +3363,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 256,
.num_ports = 8,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ .ops = &mv88e6131_ops,
},
[MV88E6161] = {
@@ -3273,8 +3376,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 6,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6161_ops,
},
[MV88E6165] = {
@@ -3284,8 +3389,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 6,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ .ops = &mv88e6165_ops,
},
[MV88E6171] = {
@@ -3295,8 +3402,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6171_ops,
},
[MV88E6172] = {
@@ -3306,8 +3415,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6172_ops,
},
[MV88E6175] = {
@@ -3317,8 +3428,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6175_ops,
},
[MV88E6176] = {
@@ -3328,8 +3441,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6176_ops,
},
[MV88E6185] = {
@@ -3339,8 +3454,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 256,
.num_ports = 10,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ .ops = &mv88e6185_ops,
},
[MV88E6240] = {
@@ -3350,8 +3467,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6240_ops,
},
[MV88E6320] = {
@@ -3361,8 +3480,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ .ops = &mv88e6320_ops,
},
[MV88E6321] = {
@@ -3372,8 +3493,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ .ops = &mv88e6321_ops,
},
[MV88E6350] = {
@@ -3383,8 +3506,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6350_ops,
},
[MV88E6351] = {
@@ -3394,8 +3519,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ .ops = &mv88e6351_ops,
},
[MV88E6352] = {
@@ -3405,8 +3532,10 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_ports = 7,
.port_base_addr = 0x10,
+ .global1_addr = 0x1b,
.age_time_coeff = 15000,
.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ .ops = &mv88e6352_ops,
},
};
@@ -3469,33 +3598,16 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
return chip;
}
-static const struct mv88e6xxx_ops mv88e6xxx_g2_smi_phy_ops = {
- .read = mv88e6xxx_g2_smi_phy_read,
- .write = mv88e6xxx_g2_smi_phy_write,
-};
-
-static const struct mv88e6xxx_ops mv88e6xxx_phy_ops = {
- .read = mv88e6xxx_read,
- .write = mv88e6xxx_write,
-};
-
static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SMI_PHY)) {
- chip->phy_ops = &mv88e6xxx_g2_smi_phy_ops;
- } else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) {
- chip->phy_ops = &mv88e6xxx_phy_ppu_ops;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
mv88e6xxx_ppu_state_init(chip);
- } else {
- chip->phy_ops = &mv88e6xxx_phy_ops;
- }
}
static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
{
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) {
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
mv88e6xxx_ppu_state_destroy(chip);
- }
}
static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
@@ -3648,6 +3760,7 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_bridge_join = mv88e6xxx_port_bridge_join,
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
@@ -3720,7 +3833,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (IS_ERR(chip->reset))
return PTR_ERR(chip->reset);
- if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16) &&
+ if (chip->info->ops->get_eeprom &&
!of_property_read_u32(np, "eeprom-length", &eeprom_len))
chip->eeprom_len = eeprom_len;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
new file mode 100644
index 000000000000..d358720b6c2d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -0,0 +1,34 @@
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "mv88e6xxx.h"
+#include "global1.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ int addr = chip->info->global1_addr;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
new file mode 100644
index 000000000000..62291e6fe3a3
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -0,0 +1,23 @@
+/*
+ * Marvell 88E6xxx Switch Global (1) Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_GLOBAL1_H
+#define _MV88E6XXX_GLOBAL1_H
+
+#include "mv88e6xxx.h"
+
+int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
+int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
+int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+
+#endif /* _MV88E6XXX_GLOBAL1_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 99ed028298ac..cf686e7506a9 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -14,6 +14,28 @@
#include "mv88e6xxx.h"
#include "global2.h"
+#define ADDR_GLOBAL2 0x1c
+
+static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, ADDR_GLOBAL2, reg, val);
+}
+
+static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, ADDR_GLOBAL2, reg, val);
+}
+
+static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+{
+ return mv88e6xxx_update(chip, ADDR_GLOBAL2, reg, update);
+}
+
+static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return mv88e6xxx_wait(chip, ADDR_GLOBAL2, reg, mask);
+}
+
/* Offset 0x06: Device Mapping Table register */
static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
@@ -21,7 +43,7 @@ static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
{
u16 val = (target << 8) | (port & 0xf);
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_DEVICE_MAPPING, val);
}
static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
@@ -52,13 +74,13 @@ static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
bool hask, u16 mask)
{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (num << 12) | (mask & port_mask);
if (hask)
val |= GLOBAL2_TRUNK_MASK_HASK;
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MASK, val);
}
/* Offset 0x08: Trunk Mapping Table register */
@@ -66,15 +88,15 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
u16 map)
{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_TRUNK_MAPPING, val);
}
static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
{
- const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
int i, err;
/* Clear all eight possible Trunk Mask vectors */
@@ -103,17 +125,17 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
int port, err;
/* Init all Ingress Rate Limit resources of all ports */
- for (port = 0; port < chip->info->num_ports; ++port) {
+ for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) {
/* XXX newer chips (like 88E6390) have different 2-bit ops */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_OP_INIT_ALL |
- (port << 8));
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_OP_INIT_ALL |
+ (port << 8));
if (err)
break;
/* Wait for the operation to complete */
- err = mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
- GLOBAL2_IRL_CMD_BUSY);
+ err = mv88e6xxx_g2_wait(chip, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_BUSY);
if (err)
break;
}
@@ -128,7 +150,7 @@ static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
{
u16 val = (pointer << 8) | data;
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_SWITCH_MAC, val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
@@ -151,7 +173,7 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
{
u16 val = (pointer << 8) | (data & 0x7);
- return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, val);
+ return mv88e6xxx_g2_update(chip, GLOBAL2_PRIO_OVERRIDE, val);
}
static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
@@ -174,16 +196,16 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD,
- GLOBAL2_EEPROM_CMD_BUSY |
- GLOBAL2_EEPROM_CMD_RUNNING);
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_EEPROM_CMD,
+ GLOBAL2_EEPROM_CMD_BUSY |
+ GLOBAL2_EEPROM_CMD_RUNNING);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_CMD, cmd);
if (err)
return err;
@@ -204,7 +226,7 @@ static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ return mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_DATA, data);
}
static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
@@ -217,7 +239,7 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_DATA, data);
if (err)
return err;
@@ -283,7 +305,7 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
int err;
/* Ensure the RO WriteEn bit is set */
- err = mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, &val);
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &val);
if (err)
return err;
@@ -346,15 +368,15 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD,
- GLOBAL2_SMI_PHY_CMD_BUSY);
+ return mv88e6xxx_g2_wait(chip, GLOBAL2_SMI_PHY_CMD,
+ GLOBAL2_SMI_PHY_CMD_BUSY);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
{
int err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD, cmd);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_CMD, cmd);
if (err)
return err;
@@ -375,7 +397,7 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
if (err)
return err;
- return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val);
+ return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
}
int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
@@ -388,7 +410,7 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
if (err)
return err;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
if (err)
return err;
@@ -404,8 +426,7 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:2x as MGMT.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X,
- 0xffff);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_2X, 0xffff);
if (err)
return err;
}
@@ -414,8 +435,7 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
/* Consider the frames with reserved multicast destination
* addresses matching 01:80:c2:00:00:0x as MGMT.
*/
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X,
- 0xffff);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_MGMT_EN_0X, 0xffff);
if (err)
return err;
}
@@ -429,7 +449,7 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, reg);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_SWITCH_MGMT, reg);
if (err)
return err;
@@ -454,8 +474,8 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
/* Initialize Cross-chip Port VLAN Table to reset defaults */
- err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR,
- GLOBAL2_PVT_ADDR_OP_INIT_ONES);
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_PVT_ADDR,
+ GLOBAL2_PVT_ADDR_OP_INIT_ONES);
if (err)
return err;
}
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 52f3f5231f51..e572121c196e 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -37,7 +37,6 @@
#define ADDR_SERDES 0x0f
#define SERDES_PAGE_FIBER 0x01
-#define REG_PORT(p) (0x10 + (p))
#define PORT_STATUS 0x00
#define PORT_STATUS_PAUSE_EN BIT(15)
#define PORT_STATUS_MY_PAUSE BIT(14)
@@ -160,7 +159,6 @@
#define PORT_TAG_REGMAP_0123 0x18
#define PORT_TAG_REGMAP_4567 0x19
-#define REG_GLOBAL 0x1b
#define GLOBAL_STATUS 0x00
#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
/* Two bits for 6165, 6185 etc */
@@ -172,8 +170,8 @@
#define GLOBAL_MAC_01 0x01
#define GLOBAL_MAC_23 0x02
#define GLOBAL_MAC_45 0x03
-#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */
-#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */
+#define GLOBAL_ATU_FID 0x01
+#define GLOBAL_VTU_FID 0x02
#define GLOBAL_VTU_FID_MASK 0xfff
#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */
#define GLOBAL_VTU_SID_MASK 0x3f
@@ -278,7 +276,6 @@
#define GLOBAL_STATS_COUNTER_32 0x1e
#define GLOBAL_STATS_COUNTER_01 0x1f
-#define REG_GLOBAL2 0x1c
#define GLOBAL2_INT_SOURCE 0x00
#define GLOBAL2_INT_MASK 0x01
#define GLOBAL2_MGMT_EN_2X 0x02
@@ -411,6 +408,11 @@ enum mv88e6xxx_cap {
*/
MV88E6XXX_CAP_SERDES,
+ /* Switch Global (1) Registers.
+ */
+ MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */
+ MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */
+
/* Switch Global 2 Registers.
* The device contains a second set of global 16-bit registers.
*/
@@ -421,12 +423,7 @@ enum mv88e6xxx_cap {
MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */
MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
- MV88E6XXX_CAP_G2_SWITCH_MAC, /* (0x0d) Switch MAC/WoL/WoF */
MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
- MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */
- MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */
- MV88E6XXX_CAP_G2_SMI_PHY_CMD, /* (0x18) SMI PHY Command */
- MV88E6XXX_CAP_G2_SMI_PHY_DATA, /* (0x19) SMI PHY Data */
/* PHY Polling Unit.
* See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
@@ -453,41 +450,34 @@ enum mv88e6xxx_cap {
};
/* Bitmask of capabilities */
-#define MV88E6XXX_FLAG_EDSA BIT(MV88E6XXX_CAP_EDSA)
-#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
-
-#define MV88E6XXX_FLAG_SMI_CMD BIT(MV88E6XXX_CAP_SMI_CMD)
-#define MV88E6XXX_FLAG_SMI_DATA BIT(MV88E6XXX_CAP_SMI_DATA)
-
-#define MV88E6XXX_FLAG_PHY_PAGE BIT(MV88E6XXX_CAP_PHY_PAGE)
-
-#define MV88E6XXX_FLAG_SERDES BIT(MV88E6XXX_CAP_SERDES)
-
-#define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X)
-#define MV88E6XXX_FLAG_G2_IRL_CMD BIT(MV88E6XXX_CAP_G2_IRL_CMD)
-#define MV88E6XXX_FLAG_G2_IRL_DATA BIT(MV88E6XXX_CAP_G2_IRL_DATA)
-#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT(MV88E6XXX_CAP_G2_PVT_ADDR)
-#define MV88E6XXX_FLAG_G2_PVT_DATA BIT(MV88E6XXX_CAP_G2_PVT_DATA)
-#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT(MV88E6XXX_CAP_G2_SWITCH_MAC)
-#define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD)
-#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA)
-#define MV88E6XXX_FLAG_G2_SMI_PHY_CMD BIT(MV88E6XXX_CAP_G2_SMI_PHY_CMD)
-#define MV88E6XXX_FLAG_G2_SMI_PHY_DATA BIT(MV88E6XXX_CAP_G2_SMI_PHY_DATA)
-
-#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
-#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
-#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP)
-#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT)
-#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU)
-
-/* EEPROM Programming via Global2 with 16-bit data */
-#define MV88E6XXX_FLAGS_EEPROM16 \
- (MV88E6XXX_FLAG_G2_EEPROM_CMD | \
- MV88E6XXX_FLAG_G2_EEPROM_DATA)
+#define MV88E6XXX_FLAG_EDSA BIT_ULL(MV88E6XXX_CAP_EDSA)
+#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE)
+
+#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
+#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA)
+
+#define MV88E6XXX_FLAG_PHY_PAGE BIT_ULL(MV88E6XXX_CAP_PHY_PAGE)
+
+#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
+
+#define MV88E6XXX_FLAG_G1_ATU_FID BIT_ULL(MV88E6XXX_CAP_G1_ATU_FID)
+#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID)
+
+#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
+#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
+#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
+#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR)
+#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
+#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
+
+#define MV88E6XXX_FLAG_PPU BIT_ULL(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE BIT_ULL(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
/* Ingress Rate Limit unit */
#define MV88E6XXX_FLAGS_IRL \
@@ -509,11 +499,6 @@ enum mv88e6xxx_cap {
(MV88E6XXX_FLAG_PHY_PAGE | \
MV88E6XXX_FLAG_SERDES)
-/* Indirect PHY access via Global2 SMI PHY registers */
-#define MV88E6XXX_FLAGS_SMI_PHY \
- (MV88E6XXX_FLAG_G2_SMI_PHY_CMD |\
- MV88E6XXX_FLAG_G2_SMI_PHY_DATA)
-
#define MV88E6XXX_FLAGS_FAMILY_6095 \
(MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
@@ -522,7 +507,9 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_MULTI_CHIP)
#define MV88E6XXX_FLAGS_FAMILY_6097 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
@@ -534,10 +521,11 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6165 \
- (MV88E6XXX_FLAG_GLOBAL2 | \
+ (MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
@@ -559,24 +547,22 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
- MV88E6XXX_FLAGS_EEPROM16 | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT | \
- MV88E6XXX_FLAGS_SMI_PHY)
+ MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6351 \
(MV88E6XXX_FLAG_EDSA | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
@@ -584,28 +570,28 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
- MV88E6XXX_FLAGS_PVT | \
- MV88E6XXX_FLAGS_SMI_PHY)
+ MV88E6XXX_FLAGS_PVT)
#define MV88E6XXX_FLAGS_FAMILY_6352 \
(MV88E6XXX_FLAG_EDSA | \
MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
- MV88E6XXX_FLAG_G2_SWITCH_MAC | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_PPU_ACTIVE | \
MV88E6XXX_FLAG_STU | \
MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
- MV88E6XXX_FLAGS_EEPROM16 | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT | \
- MV88E6XXX_FLAGS_SERDES | \
- MV88E6XXX_FLAGS_SMI_PHY)
+ MV88E6XXX_FLAGS_SERDES)
+
+struct mv88e6xxx_ops;
struct mv88e6xxx_info {
enum mv88e6xxx_family family;
@@ -614,8 +600,10 @@ struct mv88e6xxx_info {
unsigned int num_databases;
unsigned int num_ports;
unsigned int port_base_addr;
+ unsigned int global1_addr;
unsigned int age_time_coeff;
- unsigned long flags;
+ unsigned long long flags;
+ const struct mv88e6xxx_ops *ops;
};
struct mv88e6xxx_atu_entry {
@@ -626,18 +614,15 @@ struct mv88e6xxx_atu_entry {
u8 mac[ETH_ALEN];
};
-struct mv88e6xxx_vtu_stu_entry {
- /* VTU only */
+struct mv88e6xxx_vtu_entry {
u16 vid;
u16 fid;
-
- /* VTU and STU */
u8 sid;
bool valid;
u8 data[DSA_MAX_PORTS];
};
-struct mv88e6xxx_ops;
+struct mv88e6xxx_bus_ops;
struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
@@ -658,14 +643,14 @@ struct mv88e6xxx_chip {
/* The MII bus and the address on the bus that is used to
* communication with the switch
*/
- const struct mv88e6xxx_ops *smi_ops;
+ const struct mv88e6xxx_bus_ops *smi_ops;
struct mii_bus *bus;
int sw_addr;
/* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
- const struct mv88e6xxx_ops *phy_ops;
+ const struct mv88e6xxx_bus_ops *phy_ops;
struct mutex ppu_mutex;
int ppu_disabled;
struct work_struct ppu_work;
@@ -694,11 +679,25 @@ struct mv88e6xxx_chip {
struct mii_bus *mdio_bus;
};
-struct mv88e6xxx_ops {
+struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
};
+struct mv88e6xxx_ops {
+ int (*get_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+ int (*set_eeprom)(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
+ int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
+
+ int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 val);
+};
+
enum stat_type {
BANK0,
BANK1,
@@ -718,6 +717,16 @@ static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
return (chip->info->flags & flags) == flags;
}
+static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_databases;
+}
+
+static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_ports;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 7f3f1781c202..b3df70d07ff6 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -256,7 +256,7 @@ static struct regmap_access_table qca8k_readable_table = {
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
-struct regmap_config qca8k_regmap_config = {
+static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
@@ -586,13 +586,6 @@ qca8k_setup(struct dsa_switch *ds)
}
static int
-qca8k_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- /* The subsystem always calls this function so add an empty stub */
- return 0;
-}
-
-static int
qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
@@ -921,7 +914,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds)
static struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
- .set_addr = qca8k_set_addr,
.get_strings = qca8k_get_strings,
.phy_read = qca8k_phy_read,
.phy_write = qca8k_phy_write,
@@ -1040,19 +1032,7 @@ static struct mdio_driver qca8kmdio_driver = {
},
};
-static int __init
-qca8kmdio_driver_register(void)
-{
- return mdio_driver_register(&qca8kmdio_driver);
-}
-module_init(qca8kmdio_driver_register);
-
-static void __exit
-qca8kmdio_driver_unregister(void)
-{
- mdio_driver_unregister(&qca8kmdio_driver);
-}
-module_exit(qca8kmdio_driver_unregister);
+mdio_module_driver(qca8kmdio_driver);
MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8a8d05500894..8456337a237d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -237,6 +237,8 @@ enum xgene_enet_rm {
#define TCPHDR_LEN 6
#define IPHDR_POS 6
#define IPHDR_LEN 6
+#define MSS_POS 20
+#define MSS_LEN 2
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
#define ET_POS 23 /* Enable TSO */
@@ -253,6 +255,11 @@ enum xgene_enet_rm {
#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
+#define TSO_MSS0_POS 0
+#define TSO_MSS0_LEN 14
+#define TSO_MSS1_POS 16
+#define TSO_MSS1_LEN 14
+
struct xgene_enet_raw_desc {
__le64 m0;
__le64 m1;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 522ba920bfc1..429f18fc5503 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -137,6 +137,7 @@ static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
struct xgene_enet_raw_desc *raw_desc)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
struct sk_buff *skb;
struct device *dev;
skb_frag_t *frag;
@@ -144,6 +145,7 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
u16 skb_index;
u8 status;
int i, ret = 0;
+ u8 mss_index;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -160,6 +162,13 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
DMA_TO_DEVICE);
}
+ if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
+ mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
+ spin_lock(&pdata->mss_lock);
+ pdata->mss_refcnt[mss_index]--;
+ spin_unlock(&pdata->mss_lock);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -178,15 +187,53 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
return ret;
}
-static u64 xgene_enet_work_msg(struct sk_buff *skb)
+static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ bool mss_index_found = false;
+ int mss_index;
+ int i;
+
+ spin_lock(&pdata->mss_lock);
+
+ /* Reuse the slot if MSS matches */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (pdata->mss[i] == mss) {
+ pdata->mss_refcnt[i]++;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ /* Overwrite the slot with ref_count = 0 */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (!pdata->mss_refcnt[i]) {
+ pdata->mss_refcnt[i]++;
+ pdata->mac_ops->set_mss(pdata, mss, i);
+ pdata->mss[i] = mss;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ spin_unlock(&pdata->mss_lock);
+
+ /* No slots with ref_count = 0 available, return busy */
+ if (!mss_index_found)
+ return -EBUSY;
+
+ return mss_index;
+}
+
+static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
{
struct net_device *ndev = skb->dev;
struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0;
- u64 hopinfo = 0;
u32 hdr_len, mss = 0;
u32 i, len, nr_frags;
+ int mss_index;
ethhdr = xgene_enet_hdr_len(skb->data);
@@ -226,7 +273,11 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
if (!mss || ((skb->len - hdr_len) <= mss))
goto out;
- hopinfo |= SET_BIT(ET);
+ mss_index = xgene_enet_setup_mss(ndev, mss);
+ if (unlikely(mss_index < 0))
+ return -EBUSY;
+
+ *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
}
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
@@ -234,15 +285,15 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- hopinfo |= SET_VAL(TCPHDR, l4hlen) |
- SET_VAL(IPHDR, l3hlen) |
- SET_VAL(ETHHDR, ethhdr) |
- SET_VAL(EC, csum_enable) |
- SET_VAL(IS, proto) |
- SET_BIT(IC) |
- SET_BIT(TYPE_ETH_WORK_MESSAGE);
-
- return hopinfo;
+ *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
+ SET_VAL(IPHDR, l3hlen) |
+ SET_VAL(ETHHDR, ethhdr) |
+ SET_VAL(EC, csum_enable) |
+ SET_VAL(IS, proto) |
+ SET_BIT(IC) |
+ SET_BIT(TYPE_ETH_WORK_MESSAGE);
+
+ return 0;
}
static u16 xgene_enet_encode_len(u16 len)
@@ -282,20 +333,22 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
skb_frag_t *frag;
u16 tail = tx_ring->tail;
- u64 hopinfo;
+ u64 hopinfo = 0;
u32 len, hw_len;
u8 ll = 0, nv = 0, idx = 0;
bool split = false;
u32 size, offset, ell_bytes = 0;
u32 i, fidx, nr_frags, count = 1;
+ int ret;
raw_desc = &tx_ring->raw_desc[tail];
tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- hopinfo = xgene_enet_work_msg(skb);
- if (!hopinfo)
- return -EINVAL;
+ ret = xgene_enet_work_msg(skb, &hopinfo);
+ if (ret)
+ return ret;
+
raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
hopinfo);
@@ -435,6 +488,9 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count == -EBUSY)
+ return NETDEV_TX_BUSY;
+
if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -1669,7 +1725,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO;
- pdata->mss = XGENE_ENET_MSS;
+ spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 773537161171..0cda58f5a840 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -47,7 +47,7 @@
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
#define MAX_EXP_BUFFS 256
-#define XGENE_ENET_MSS 1448
+#define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 16
@@ -143,7 +143,7 @@ struct xgene_mac_ops {
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
- void (*set_mss)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
void (*link_state)(struct work_struct *work);
};
@@ -212,7 +212,9 @@ struct xgene_enet_pdata {
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
- u32 mss;
+ u32 mss[NUM_MSS_REG];
+ u32 mss_refcnt[NUM_MSS_REG];
+ spinlock_t mss_lock; /* mss lock */
u8 tx_delay;
u8 rx_delay;
bool mdio_driver;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 279ee27004f7..6475f383ba83 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -232,9 +232,22 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
-static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
+ u16 mss, u8 index)
{
- xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+ u8 offset;
+ u32 data;
+
+ offset = (index < 2) ? 0 : 4;
+ xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
+
+ if (!(index & 0x1))
+ data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
+ SET_VAL(TSO_MSS0, mss);
+ else
+ data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
+
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
}
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
@@ -258,7 +271,6 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
xgene_xgmac_set_mac_addr(pdata);
- xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74f0a37c4eb6..17aa33c5567d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1486,7 +1486,7 @@ static int b44_open(struct net_device *dev)
b44_enable_ints(bp);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_start(bp->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
out:
@@ -1651,7 +1651,7 @@ static int b44_close(struct net_device *dev)
netif_stop_queue(dev);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_stop(bp->phydev);
+ phy_stop(dev->phydev);
napi_disable(&bp->napi);
@@ -1832,90 +1832,100 @@ static int b44_nway_reset(struct net_device *dev)
return r;
}
-static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ u32 supported, advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- return phy_ethtool_gset(bp->phydev, cmd);
+ BUG_ON(!dev->phydev);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_MII);
+ supported = (SUPPORTED_Autoneg);
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
- cmd->advertising = 0;
+ advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (bp->flags & B44_FLAG_ADV_10FULL)
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (bp->flags & B44_FLAG_ADV_100HALF)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (bp->flags & B44_FLAG_ADV_100FULL)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
- SPEED_100 : SPEED_10));
- cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+ advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = 0;
- cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
- cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+ cmd->base.port = 0;
+ cmd->base.phy_address = bp->phy_addr;
+ cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
- if (cmd->autoneg == AUTONEG_ENABLE)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ advertising |= ADVERTISED_Autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (!netif_running(dev)){
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+
return 0;
}
-static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
u32 speed;
int ret;
+ u32 advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
+ BUG_ON(!dev->phydev);
spin_lock_irq(&bp->lock);
if (netif_running(dev))
b44_setup_phy(bp);
- ret = phy_ethtool_sset(bp->phydev, cmd);
+ ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
spin_unlock_irq(&bp->lock);
return ret;
}
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* We do not support gigabit. */
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (cmd->advertising &
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
} else if ((speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)) {
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)) {
return -EINVAL;
}
spin_lock_irq(&bp->lock);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
bp->flags &= ~(B44_FLAG_FORCE_LINK |
B44_FLAG_100_BASE_T |
B44_FLAG_FULL_DUPLEX |
@@ -1923,19 +1933,19 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
- if (cmd->advertising == 0) {
+ if (advertising == 0) {
bp->flags |= (B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
} else {
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
bp->flags |= B44_FLAG_ADV_10HALF;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->flags |= B44_FLAG_ADV_10FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
bp->flags |= B44_FLAG_ADV_100HALF;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->flags |= B44_FLAG_ADV_100FULL;
}
} else {
@@ -1943,7 +1953,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
if (speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
@@ -2110,8 +2120,6 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
- .get_settings = b44_get_settings,
- .set_settings = b44_set_settings,
.nway_reset = b44_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = b44_get_wol,
@@ -2125,6 +2133,8 @@ static const struct ethtool_ops b44_ethtool_ops = {
.get_strings = b44_get_strings,
.get_sset_count = b44_get_sset_count,
.get_ethtool_stats = b44_get_ethtool_stats,
+ .get_link_ksettings = b44_get_link_ksettings,
+ .set_link_ksettings = b44_set_link_ksettings,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2137,8 +2147,8 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_lock_irq(&bp->lock);
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ BUG_ON(!dev->phydev);
+ err = phy_mii_ioctl(dev->phydev, ifr, cmd);
} else {
err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
}
@@ -2206,7 +2216,7 @@ static const struct net_device_ops b44_netdev_ops = {
static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phydev;
+ struct phy_device *phydev = dev->phydev;
bool status_changed = 0;
BUG_ON(!phydev);
@@ -2303,7 +2313,6 @@ static int b44_register_phy_one(struct b44 *bp)
SUPPORTED_MII);
phydev->advertising = phydev->supported;
- bp->phydev = phydev;
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
@@ -2323,9 +2332,10 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
+ struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
- phy_disconnect(bp->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 65d88d7c5581..89d2cf341163 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -404,7 +404,6 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int old_link;
struct mii_if_info mii_if;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6c8bc5fadac7..ae364c74baf3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -791,7 +791,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev)
int status_changed;
priv = netdev_priv(dev);
- phydev = priv->phydev;
+ phydev = dev->phydev;
status_changed = 0;
if (priv->old_link != phydev->link) {
@@ -913,7 +913,6 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
- priv->phydev = phydev;
}
/* mask all interrupts and request them */
@@ -1085,7 +1084,7 @@ static int bcm_enet_open(struct net_device *dev)
ENETDMAC_IRMASK, priv->tx_chan);
if (priv->has_phy)
- phy_start(priv->phydev);
+ phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1127,7 +1126,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1190,7 +1189,7 @@ static int bcm_enet_stop(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
del_timer_sync(&priv->rx_timeout);
/* mask all interrupts */
@@ -1234,10 +1233,8 @@ static int bcm_enet_stop(struct net_device *dev)
free_irq(dev->irq, dev);
/* release phy */
- if (priv->has_phy) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
+ if (priv->has_phy)
+ phy_disconnect(dev->phydev);
return 0;
}
@@ -1437,64 +1434,68 @@ static int bcm_enet_nway_reset(struct net_device *dev)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(dev->phydev);
}
return -EOPNOTSUPP;
}
-static int bcm_enet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
+ u32 supported, advertising;
priv = netdev_priv(dev);
- cmd->maxrxpkt = 0;
- cmd->maxtxpkt = 0;
-
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_gset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
} else {
- cmd->autoneg = 0;
- ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
- ? SPEED_100 : SPEED_10));
- cmd->duplex = (priv->force_duplex_full) ?
+ cmd->base.autoneg = 0;
+ cmd->base.speed = (priv->force_speed_100) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->supported = ADVERTISED_10baseT_Half |
+ supported = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->advertising = 0;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
+ advertising = 0;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
+ cmd->base.port = PORT_MII;
}
return 0;
}
-static int bcm_enet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_sset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(dev->phydev, cmd);
} else {
- if (cmd->autoneg ||
- (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
- cmd->port != PORT_MII)
+ if (cmd->base.autoneg ||
+ (cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ cmd->base.port != PORT_MII)
return -EINVAL;
- priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
- priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+ priv->force_speed_100 =
+ (cmd->base.speed == SPEED_100) ? 1 : 0;
+ priv->force_duplex_full =
+ (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
if (netif_running(dev))
bcm_enet_adjust_link(dev);
@@ -1588,14 +1589,14 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.nway_reset = bcm_enet_nway_reset,
- .get_settings = bcm_enet_get_settings,
- .set_settings = bcm_enet_set_settings,
.get_drvinfo = bcm_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = bcm_enet_get_ringparam,
.set_ringparam = bcm_enet_set_ringparam,
.get_pauseparam = bcm_enet_get_pauseparam,
.set_pauseparam = bcm_enet_set_pauseparam,
+ .get_link_ksettings = bcm_enet_get_link_ksettings,
+ .set_link_ksettings = bcm_enet_set_link_ksettings,
};
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1604,9 +1605,9 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
} else {
struct mii_if_info mii;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index f55af4310085..0a1b7b2e55bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -290,7 +290,6 @@ struct bcm_enet_priv {
/* used when a phy is connected (phylib used) */
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_link;
int old_duplex;
int old_pause;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ecd357dbb1d4..27f11a5d5fe2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto out;
-
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
- bnx2_release_firmware(bp);
goto out;
}
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto error;
+
+
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
+ bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e68fadecfdb..243cb9748d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -492,7 +492,8 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
-int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index dab61a81a3ba..20fe6a8c35c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12563,43 +12563,64 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
-static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p)
+struct bnx2x_mcast_list_elem_group
{
- int mc_count = netdev_mc_count(bp->dev);
- struct bnx2x_mcast_list_elem *mc_mac =
- kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
- struct netdev_hw_addr *ha;
+ struct list_head mcast_group_link;
+ struct bnx2x_mcast_list_elem mcast_elems[];
+};
- if (!mc_mac) {
- BNX2X_ERR("Failed to allocate mc MAC list\n");
- return -ENOMEM;
+#define MCAST_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
+ sizeof(struct bnx2x_mcast_list_elem))
+
+static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_list_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_list_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
}
+}
- INIT_LIST_HEAD(&p->mcast_list);
+static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_list_elem *mc_mac;
+ struct netdev_hw_addr *ha;
+ struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
+ int mc_count = netdev_mc_count(bp->dev);
+ int offset = 0;
+ INIT_LIST_HEAD(&p->mcast_list);
netdev_for_each_mc_addr(ha, bp->dev) {
+ if (!offset) {
+ current_mcast_group =
+ (struct bnx2x_mcast_list_elem_group *)
+ __get_free_page(GFP_ATOMIC);
+ if (!current_mcast_group) {
+ bnx2x_free_mcast_macs_list(mcast_group_list);
+ BNX2X_ERR("Failed to allocate mc MAC list\n");
+ return -ENOMEM;
+ }
+ list_add(&current_mcast_group->mcast_group_link,
+ mcast_group_list);
+ }
+ mc_mac = &current_mcast_group->mcast_elems[offset];
mc_mac->mac = bnx2x_mc_addr(ha);
list_add_tail(&mc_mac->link, &p->mcast_list);
- mc_mac++;
+ offset++;
+ if (offset == MCAST_ELEMS_PER_PG)
+ offset = 0;
}
-
p->mcast_list_len = mc_count;
-
return 0;
}
-static void bnx2x_free_mcast_macs_list(
- struct bnx2x_mcast_ramrod_params *p)
-{
- struct bnx2x_mcast_list_elem *mc_mac =
- list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
- link);
-
- WARN_ON(!mc_mac);
- kfree(mc_mac);
-}
-
/**
* bnx2x_set_uc_list - configure a new unicast MACs list.
*
@@ -12647,6 +12668,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
{
+ LIST_HEAD(mcast_group_list);
struct net_device *dev = bp->dev;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
int rc = 0;
@@ -12662,7 +12684,7 @@ static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
/* then, configure a new MACs list */
if (netdev_mc_count(dev)) {
- rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
if (rc)
return rc;
@@ -12673,7 +12695,7 @@ static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
rc);
- bnx2x_free_mcast_macs_list(&rparam);
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
}
return rc;
@@ -12681,6 +12703,7 @@ static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
static int bnx2x_set_mc_list(struct bnx2x *bp)
{
+ LIST_HEAD(mcast_group_list);
struct bnx2x_mcast_ramrod_params rparam = {NULL};
struct net_device *dev = bp->dev;
int rc = 0;
@@ -12692,7 +12715,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj;
if (netdev_mc_count(dev)) {
- rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
if (rc)
return rc;
@@ -12703,7 +12726,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
rc);
- bnx2x_free_mcast_macs_list(&rparam);
+ bnx2x_free_mcast_macs_list(&mcast_group_list);
} else {
/* If no mc addresses are required, flush the configuration */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d468380c2a23..cea6bdcde33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2606,8 +2606,23 @@ struct bnx2x_mcast_bin_elem {
int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */
};
+union bnx2x_mcast_elem {
+ struct bnx2x_mcast_bin_elem bin_elem;
+ struct bnx2x_mcast_mac_elem mac_elem;
+};
+
+struct bnx2x_mcast_elem_group {
+ struct list_head mcast_group_link;
+ union bnx2x_mcast_elem mcast_elems[];
+};
+
+#define MCAST_MAC_ELEMS_PER_PG \
+ ((PAGE_SIZE - sizeof(struct bnx2x_mcast_elem_group)) / \
+ sizeof(union bnx2x_mcast_elem))
+
struct bnx2x_pending_mcast_cmd {
struct list_head link;
+ struct list_head group_head;
int type; /* BNX2X_MCAST_CMD_X */
union {
struct list_head macs_head;
@@ -2638,16 +2653,29 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
return 0;
}
+static void bnx2x_free_groups(struct list_head *mcast_group_list)
+{
+ struct bnx2x_mcast_elem_group *current_mcast_group;
+
+ while (!list_empty(mcast_group_list)) {
+ current_mcast_group = list_first_entry(mcast_group_list,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
+ list_del(&current_mcast_group->mcast_group_link);
+ free_page((unsigned long)current_mcast_group);
+ }
+}
+
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
enum bnx2x_mcast_cmd cmd)
{
- int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
- struct bnx2x_mcast_mac_elem *cur_mac = NULL;
struct bnx2x_mcast_list_elem *pos;
- int macs_list_len = 0, macs_list_len_size;
+ struct bnx2x_mcast_elem_group *elem_group;
+ struct bnx2x_mcast_mac_elem *mac_elem;
+ int total_elems = 0, macs_list_len = 0, offset = 0;
/* When adding MACs we'll need to store their values */
if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET)
@@ -2657,50 +2685,61 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
if (!p->mcast_list_len)
return 0;
- /* For a set command, we need to allocate sufficient memory for all
- * the bins, since we can't analyze at this point how much memory would
- * be required.
- */
- macs_list_len_size = macs_list_len *
- sizeof(struct bnx2x_mcast_mac_elem);
- if (cmd == BNX2X_MCAST_CMD_SET) {
- int bin_size = BNX2X_MCAST_BINS_NUM *
- sizeof(struct bnx2x_mcast_bin_elem);
-
- if (bin_size > macs_list_len_size)
- macs_list_len_size = bin_size;
- }
- total_sz = sizeof(*new_cmd) + macs_list_len_size;
-
/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
- new_cmd = kzalloc(total_sz, GFP_ATOMIC);
-
+ new_cmd = kzalloc(sizeof(*new_cmd), GFP_ATOMIC);
if (!new_cmd)
return -ENOMEM;
- DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
- cmd, macs_list_len);
-
INIT_LIST_HEAD(&new_cmd->data.macs_head);
-
+ INIT_LIST_HEAD(&new_cmd->group_head);
new_cmd->type = cmd;
new_cmd->done = false;
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+ cmd, macs_list_len);
+
switch (cmd) {
case BNX2X_MCAST_CMD_ADD:
case BNX2X_MCAST_CMD_SET:
- cur_mac = (struct bnx2x_mcast_mac_elem *)
- ((u8 *)new_cmd + sizeof(*new_cmd));
-
- /* Push the MACs of the current command into the pending command
- * MACs list: FIFO
+ /* For a set command, we need to allocate sufficient memory for
+ * all the bins, since we can't analyze at this point how much
+ * memory would be required.
*/
+ total_elems = macs_list_len;
+ if (cmd == BNX2X_MCAST_CMD_SET) {
+ if (total_elems < BNX2X_MCAST_BINS_NUM)
+ total_elems = BNX2X_MCAST_BINS_NUM;
+ }
+ while (total_elems > 0) {
+ elem_group = (struct bnx2x_mcast_elem_group *)
+ __get_free_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!elem_group) {
+ bnx2x_free_groups(&new_cmd->group_head);
+ kfree(new_cmd);
+ return -ENOMEM;
+ }
+ total_elems -= MCAST_MAC_ELEMS_PER_PG;
+ list_add_tail(&elem_group->mcast_group_link,
+ &new_cmd->group_head);
+ }
+ elem_group = list_first_entry(&new_cmd->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
list_for_each_entry(pos, &p->mcast_list, link) {
- memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
- list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
- cur_mac++;
+ mac_elem = &elem_group->mcast_elems[offset].mac_elem;
+ memcpy(mac_elem->mac, pos->mac, ETH_ALEN);
+ /* Push the MACs of the current command into the pending
+ * command MACs list: FIFO
+ */
+ list_add_tail(&mac_elem->link,
+ &new_cmd->data.macs_head);
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
}
-
break;
case BNX2X_MCAST_CMD_DEL:
@@ -2978,7 +3017,8 @@ bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
struct bnx2x_mcast_bin_elem *p_item;
- int i, cnt = 0, mac_cnt = 0;
+ struct bnx2x_mcast_elem_group *elem_group;
+ int cnt = 0, mac_cnt = 0, offset = 0, i;
memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ);
memcpy(cur, o->registry.aprox_match.vec,
@@ -3001,9 +3041,10 @@ bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
* a list that will be used to configure bins.
*/
cmd_pos->set_convert = true;
- p_item = (struct bnx2x_mcast_bin_elem *)(cmd_pos + 1);
INIT_LIST_HEAD(&cmd_pos->data.macs_head);
-
+ elem_group = list_first_entry(&cmd_pos->group_head,
+ struct bnx2x_mcast_elem_group,
+ mcast_group_link);
for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) {
bool b_current = !!BIT_VEC64_TEST_BIT(cur, i);
bool b_required = !!BIT_VEC64_TEST_BIT(req, i);
@@ -3011,12 +3052,18 @@ bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp,
if (b_current == b_required)
continue;
+ p_item = &elem_group->mcast_elems[offset].bin_elem;
p_item->bin = i;
p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD
: BNX2X_MCAST_CMD_SET_DEL;
list_add_tail(&p_item->link , &cmd_pos->data.macs_head);
- p_item++;
cnt++;
+ offset++;
+ if (offset == MCAST_MAC_ELEMS_PER_PG) {
+ offset = 0;
+ elem_group = list_next_entry(elem_group,
+ mcast_group_link);
+ }
}
/* We now definitely know how many commands are hiding here.
@@ -3103,6 +3150,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
*/
if (cmd_pos->done) {
list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
kfree(cmd_pos);
}
@@ -3741,6 +3789,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
}
list_del(&cmd_pos->link);
+ bnx2x_free_groups(&cmd_pos->group_head);
kfree(cmd_pos);
return cnt;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6c586b045d1d..3f77d0863543 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2521,7 +2521,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
for_each_vf(bp, vfidx) {
bulletin = BP_VF_BULLETIN(bp, vfidx);
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
- bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
+ htons(ETH_P_8021Q));
}
}
@@ -2781,7 +2782,8 @@ static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
return 0;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct pf_vf_bulletin_content *bulletin = NULL;
struct bnx2x *bp = netdev_priv(dev);
@@ -2796,6 +2798,9 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
vfidx, vlan, 0);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 228c964e709a..a9f9f3738022 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -32,6 +32,7 @@
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/rtc.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -93,50 +94,49 @@ enum board_idx {
BCM57404_NPAR,
BCM57406_NPAR,
BCM57407_SFP,
+ BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
- BCM57304_VF,
- BCM57404_VF,
- BCM57414_VF,
- BCM57314_VF,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
};
/* indexed by enum above */
static const struct {
char *name;
} board_info[] = {
- { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
- { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
- { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
@@ -160,13 +160,19 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
#ifdef CONFIG_BNXT_SRIOV
- { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
- { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
- { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
- { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
#endif
{ 0 }
};
@@ -189,8 +195,7 @@ static const u16 bnxt_async_events_arr[] = {
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF ||
- idx == BCM57314_VF || idx == BCM57414_VF);
+ return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -3419,10 +3424,10 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
- vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+ vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
req.hash_type = cpu_to_le32(vnic->hash_type);
@@ -4156,6 +4161,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4187,12 +4197,6 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- if (is_valid_ether_addr(vf->mac_addr))
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- else
- random_ether_addr(bp->dev->dev_addr);
vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -4204,14 +4208,21 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
vf->max_vnics = le16_to_cpu(resp->max_vnics);
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+ return rc;
#endif
}
- bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
- bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
-
hwrm_func_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4249,6 +4260,9 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bp->max_tc = 1;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4307,6 +4321,31 @@ hwrm_ver_get_exit:
return rc;
}
+int bnxt_hwrm_fw_set_time(struct bnxt *bp)
+{
+#if IS_ENABLED(CONFIG_RTC_LIB)
+ struct hwrm_fw_set_time_input req = {0};
+ struct rtc_time tm;
+ struct timeval tv;
+
+ if (bp->hwrm_spec_code < 0x10400)
+ return -EOPNOTSUPP;
+
+ do_gettimeofday(&tv);
+ rtc_time_to_tm(tv.tv_sec, &tm);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
+ req.year = cpu_to_le16(1900 + tm.tm_year);
+ req.month = 1 + tm.tm_mon;
+ req.day = tm.tm_mday;
+ req.hour = tm.tm_hour;
+ req.minute = tm.tm_min;
+ req.second = tm.tm_sec;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
int rc;
@@ -6804,6 +6843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
+ bnxt_hwrm_fw_set_time(bp);
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 23e04a6142fb..51b164a0e844 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.3.0"
+#define DRV_MODULE_VERSION "1.5.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 3
+#define DRV_VER_MIN 5
#define DRV_VER_UPD 0
struct tx_bd {
@@ -106,11 +106,11 @@ struct tx_cmp {
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
#define CMP_TYPE_ERROR_STATUS 48
- #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
- #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define TX_CMP_FLAGS_ERROR (1 << 6)
#define TX_CMP_FLAGS_PUSH (1 << 7)
@@ -389,11 +389,6 @@ struct rx_tpa_end_cmp_ext {
#define INVALID_HW_RING_ID ((u16)-1)
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
-
/* The hardware supports certain page sizes. Use the supported page sizes
* to allocate the rings.
*/
@@ -418,7 +413,7 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
-#define BNXT_MIN_PKT_SIZE 45
+#define BNXT_MIN_PKT_SIZE 52
#define BNXT_NUM_TESTS(bp) 0
@@ -1225,6 +1220,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b83e17403d6c..a7e04ff4eaed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -21,6 +21,8 @@
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
@@ -346,7 +348,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
- channel->max_combined = max_rx_rings;
+ channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
@@ -404,8 +406,8 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
max_tx_rings /= tcs;
- if (sh && (channel->combined_count > max_rx_rings ||
- channel->combined_count > max_tx_rings))
+ if (sh &&
+ channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
return -ENOMEM;
if (!sh && (channel->rx_count > max_rx_rings ||
@@ -428,8 +430,10 @@ static int bnxt_set_channels(struct net_device *dev,
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->rx_nr_rings = channel->combined_count;
- bp->tx_nr_rings_per_tc = channel->combined_count;
+ bp->rx_nr_rings = min_t(int, channel->combined_count,
+ max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
+ max_tx_rings);
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count;
@@ -1028,6 +1032,10 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+
static int bnxt_flash_nvram(struct net_device *dev,
u16 dir_type,
u16 dir_ordinal,
@@ -1179,7 +1187,6 @@ static int bnxt_flash_firmware(struct net_device *dev,
(unsigned long)calculated_crc);
return -EINVAL;
}
- /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw_data, fw_size);
if (rc == 0) /* Firmware update successful */
@@ -1188,6 +1195,57 @@ static int bnxt_flash_firmware(struct net_device *dev,
return rc;
}
+static int bnxt_flash_microcode(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ struct bnxt_ucode_trailer *trailer;
+ u32 calculated_crc;
+ u32 stored_crc;
+ int rc = 0;
+
+ if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
+ sizeof(*trailer)));
+ if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
+ netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
+ le32_to_cpu(trailer->sig));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->dir_type) != dir_type) {
+ netdev_err(dev, "Expected microcode type: %d, read: %d\n",
+ dir_type, le16_to_cpu(trailer->dir_type));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->trailer_length) <
+ sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode trailer length: %d\n",
+ le16_to_cpu(trailer->trailer_length));
+ return -EINVAL;
+ }
+
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev,
+ "CRC32 (%08lX) does not match calculated: %08lX\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+
+ return rc;
+}
+
static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
{
switch (dir_type) {
@@ -1206,7 +1264,7 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
return false;
}
-static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
{
switch (dir_type) {
case BNX_DIR_TYPE_AVS:
@@ -1227,7 +1285,7 @@ static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
static bool bnxt_dir_type_is_executable(u16 dir_type)
{
return bnxt_dir_type_is_ape_bin_format(dir_type) ||
- bnxt_dir_type_is_unprotected_exec_format(dir_type);
+ bnxt_dir_type_is_other_exec_format(dir_type);
}
static int bnxt_flash_firmware_from_file(struct net_device *dev,
@@ -1237,10 +1295,6 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (dir_type != BNX_DIR_TYPE_UPDATE &&
- bnxt_dir_type_is_executable(dir_type) == false)
- return -EINVAL;
-
rc = request_firmware(&fw, filename, &dev->dev);
if (rc != 0) {
netdev_err(dev, "Error %d requesting firmware file: %s\n",
@@ -1249,6 +1303,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw->data, fw->size);
@@ -1257,10 +1313,83 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
static int bnxt_flash_package_from_file(struct net_device *dev,
- char *filename)
+ char *filename, u32 install_type)
{
- netdev_err(dev, "packages are not yet supported\n");
- return -EINVAL;
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_nvm_install_update_input install = {0};
+ const struct firmware *fw;
+ u32 item_len;
+ u16 index;
+ int rc;
+
+ bnxt_hwrm_fw_set_time(bp);
+
+ if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL) != 0) {
+ netdev_err(dev, "PKG update area not created in nvram\n");
+ return -ENOBUFS;
+ }
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "PKG error %d requesting file: %s\n",
+ rc, filename);
+ return rc;
+ }
+
+ if (fw->size > item_len) {
+ netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+ (unsigned long)fw->size);
+ rc = -EFBIG;
+ } else {
+ dma_addr_t dma_handle;
+ u8 *kmem;
+ struct hwrm_nvm_modify_input modify = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+
+ modify.dir_idx = cpu_to_le16(index);
+ modify.len = cpu_to_le32(fw->size);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
+ &dma_handle, GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev,
+ "dma_alloc_coherent failure, length = %u\n",
+ (unsigned int)fw->size);
+ rc = -ENOMEM;
+ } else {
+ memcpy(kmem, fw->data, fw->size);
+ modify.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &modify, sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
+ dma_handle);
+ }
+ }
+ release_firmware(fw);
+ if (rc)
+ return rc;
+
+ if ((install_type & 0xffff) == 0)
+ install_type >>= 16;
+ bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+ install.install_type = cpu_to_le32(install_type);
+
+ rc = hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ if (resp->result) {
+ netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
+ (s8)resp->result, (int)resp->problem_item);
+ return -ENOPKG;
+ }
+ return 0;
}
static int bnxt_flash_device(struct net_device *dev,
@@ -1271,8 +1400,10 @@ static int bnxt_flash_device(struct net_device *dev,
return -EINVAL;
}
- if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
- return bnxt_flash_package_from_file(dev, flash->data);
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
+ flash->region > 0xffff)
+ return bnxt_flash_package_from_file(dev, flash->data,
+ flash->region);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -1516,7 +1647,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
/* Create or re-write an NVM item: */
if (bnxt_dir_type_is_executable(type) == true)
- return -EINVAL;
+ return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
attr = eeprom->offset & 0xffff;
@@ -1718,6 +1849,25 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
return rc;
}
+static int bnxt_nway_reset(struct net_device *dev)
+{
+ int rc = 0;
+
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, true, false);
+
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -1750,4 +1900,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .nway_reset = bnxt_nway_reset
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 82bf44ab811b..cad30ddc6936 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -11,6 +11,7 @@
#define __BNXT_FW_HDR_H__
#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+#define BNXT_UCODE_TRAILER_SIGNATURE 0x726c7254 /* "Trlr" */
enum SUPPORTED_FAMILY {
DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
@@ -85,7 +86,7 @@ enum SUPPORTED_MEDIA {
struct bnxt_fw_header {
__le32 signature; /* constains the constant value of
- * BNXT_Firmware_Bin_Signatures
+ * BNXT_FIRMWARE_BIN_SIGNATURE
*/
u8 flags; /* reserved for ChiMP use */
u8 code_type; /* enum SUPPORTED_CODE */
@@ -102,4 +103,17 @@ struct bnxt_fw_header {
u8 major_ver;
};
+/* Microcode and pre-boot software/firmware trailer: */
+struct bnxt_ucode_trailer {
+ u8 rsa_sig[256];
+ __le16 flags;
+ u8 version_format;
+ u8 version_length;
+ u8 version[16];
+ __le16 dir_type;
+ __le16 trailer_length;
+ __le32 sig; /* BNXT_UCODE_TRAILER_SIGNATURE */
+ __le32 chksum; /* CRC-32 */
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 517567f6d651..04a96cc3498a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -39,7 +39,7 @@ struct eject_cmpl {
__le16 type;
#define EJECT_CMPL_TYPE_MASK 0x3fUL
#define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
__le16 len;
__le32 opaque;
__le32 v;
@@ -52,7 +52,7 @@ struct hwrm_cmpl {
__le16 type;
#define HWRM_CMPL_TYPE_MASK 0x3fUL
#define HWRM_CMPL_TYPE_SFT 0
- #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ #define HWRM_CMPL_TYPE_HWRM_DONE 0x20UL
__le16 sequence_id;
__le32 unused_1;
__le32 v;
@@ -65,7 +65,7 @@ struct hwrm_fwd_req_cmpl {
__le16 req_len_type;
#define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
__le16 source_id;
@@ -81,7 +81,7 @@ struct hwrm_fwd_resp_cmpl {
__le16 type;
#define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
- #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
__le16 source_id;
__le16 resp_len;
__le16 unused_1;
@@ -96,25 +96,26 @@ struct hwrm_async_event_cmpl {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
@@ -130,9 +131,9 @@ struct hwrm_async_event_cmpl_link_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
@@ -156,9 +157,9 @@ struct hwrm_async_event_cmpl_link_mtu_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
@@ -176,9 +177,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
@@ -200,8 +201,7 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -211,9 +211,9 @@ struct hwrm_async_event_cmpl_dcb_config_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
@@ -231,9 +231,9 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
@@ -258,9 +258,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
@@ -278,9 +278,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
@@ -300,9 +300,9 @@ struct hwrm_async_event_cmpl_func_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
@@ -320,9 +320,9 @@ struct hwrm_async_event_cmpl_func_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
@@ -340,9 +340,9 @@ struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
@@ -362,9 +362,9 @@ struct hwrm_async_event_cmpl_pf_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
@@ -384,9 +384,9 @@ struct hwrm_async_event_cmpl_vf_flr {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
@@ -404,9 +404,9 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
@@ -424,9 +424,9 @@ struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
@@ -443,9 +443,9 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
@@ -465,15 +465,15 @@ struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
@@ -485,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.3.0 */
+/* HW Resource Manager Specification 1.5.1 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 3
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 5
+#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_STR "1.3.0"
+#define HWRM_VERSION_STR "1.5.1"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -556,8 +556,8 @@ struct cmd_nums {
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
- #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define RESERVED2 (0x33UL)
+ #define RESERVED3 (0x34UL)
#define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
#define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
#define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
@@ -574,6 +574,7 @@ struct cmd_nums {
#define HWRM_VNIC_RSS_QCFG (0x47UL)
#define HWRM_VNIC_PLCMODES_CFG (0x48UL)
#define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_VNIC_QCAPS (0x4aUL)
#define HWRM_RING_ALLOC (0x50UL)
#define HWRM_RING_FREE (0x51UL)
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
@@ -581,13 +582,15 @@ struct cmd_nums {
#define HWRM_RING_RESET (0x5eUL)
#define HWRM_RING_GRP_ALLOC (0x60UL)
#define HWRM_RING_GRP_FREE (0x61UL)
+ #define RESERVED5 (0x64UL)
+ #define RESERVED6 (0x65UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
#define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED3 (0x94UL)
+ #define RESERVED4 (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
@@ -607,6 +610,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
#define HWRM_FW_RESET (0xc0UL)
#define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_FW_SET_TIME (0xc8UL)
+ #define HWRM_FW_GET_TIME (0xc9UL)
#define HWRM_EXEC_FWD_RESP (0xd0UL)
#define HWRM_REJECT_FWD_RESP (0xd1UL)
#define HWRM_FWD_RESP (0xd2UL)
@@ -615,11 +620,13 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_ALLOC (0xf0UL)
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
+ #define HWRM_WOL_REASON_QCFG (0xf3UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
#define HWRM_NVM_MODIFY (0xfff4UL)
#define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
#define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
@@ -824,7 +831,9 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_min;
u8 netctrl_fw_bld;
u8 netctrl_fw_rsvd;
- __le32 reserved1;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -839,9 +848,9 @@ struct hwrm_ver_get_output {
u8 chip_metal;
u8 chip_bond_id;
u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0)
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
@@ -863,10 +872,10 @@ struct hwrm_func_reset_input {
#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
__le16 vf_id;
u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
u8 unused_0;
};
@@ -1028,6 +1037,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1047,9 +1060,8 @@ struct hwrm_func_qcaps_output {
__le32 max_mcast_filters;
__le32 max_flow_id;
__le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
u8 unused_0;
- u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -1077,6 +1089,7 @@ struct hwrm_func_qcfg_output {
__le16 flags;
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1089,29 +1102,46 @@ struct hwrm_func_qcfg_output {
__le16 mru;
__le16 stat_ctx_id;
u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
u8 unused_0;
__le16 dflt_vnic_id;
u8 unused_1;
u8 unused_2;
__le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
u8 unused_3;
- __le16 unused_4;
+ __le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
+ __le16 alloc_sp_tx_rings;
+ u8 unused_4;
u8 valid;
};
@@ -1171,18 +1201,36 @@ struct hwrm_func_cfg_input {
__le16 dflt_vlan;
__be32 dflt_ip_addr[4];
__le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
__le16 async_event_cr;
u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
u8 allowed_vlan_pris;
u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
u8 unused_2;
__le16 num_mcast_filters;
};
@@ -1341,16 +1389,16 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
__le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1415,13 +1463,13 @@ struct hwrm_func_buf_rgtr_input {
__le16 vf_id;
__le16 req_buf_num_pages;
__le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
__le16 req_buf_len;
__le16 resp_buf_len;
u8 unused_0;
@@ -1473,16 +1521,16 @@ struct hwrm_func_drv_qver_output {
__le16 seq_id;
__le16 resp_len;
__le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1528,44 +1576,44 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1582,12 +1630,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1641,25 +1689,25 @@ struct hwrm_port_phy_qcfg_output {
__le16 seq_id;
__le16 resp_len;
u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
u8 unused_0;
__le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
u8 duplex;
- #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL
u8 pause;
#define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
@@ -1679,39 +1727,39 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1728,46 +1776,46 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
@@ -1796,11 +1844,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
@@ -1859,7 +1907,7 @@ struct hwrm_port_mac_cfg_input {
__le64 resp_addr;
__le32 flags;
#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
@@ -1868,28 +1916,50 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
__le16 port_id;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
- u8 ivlan_pri2cos_map_pri;
- u8 lcos_map_pri;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
__le16 rx_ts_capture_ptp_msg_type;
__le16 tx_ts_capture_ptp_msg_type;
- __le32 unused_0;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
};
/* Output (16 bytes) */
@@ -1902,9 +1972,9 @@ struct hwrm_port_mac_cfg_output {
__le16 mtu;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
u8 unused_0;
u8 valid;
};
@@ -2163,8 +2233,8 @@ struct hwrm_queue_qportcfg_input {
__le64 resp_addr;
__le32 flags;
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
@@ -2179,50 +2249,51 @@ struct hwrm_queue_qportcfg_output {
u8 max_configurable_queues;
u8 max_configurable_lossless_queues;
u8 queue_cfg_allowed;
- u8 queue_buffers_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
u8 queue_pfcenable_cfg_allowed;
u8 queue_pri2cos_cfg_allowed;
u8 queue_cos2bw_cfg_allowed;
u8 queue_id0;
u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id1;
u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id2;
u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id3;
u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id4;
u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id5;
u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id6;
u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id7;
u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 valid;
};
@@ -2235,19 +2306,21 @@ struct hwrm_queue_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
__le32 queue_id;
__le32 dflt_len;
u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 unused_0[7];
};
@@ -2264,50 +2337,6 @@ struct hwrm_queue_cfg_output {
u8 valid;
};
-/* hwrm_queue_buffers_cfg */
-/* Input (56 bytes) */
-struct hwrm_queue_buffers_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
- __le32 enables;
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL
- __le32 queue_id;
- __le32 reserved;
- __le32 shared;
- __le32 xoff;
- __le32 xon;
- __le32 full;
- __le32 notfull;
- __le32 max;
-};
-
-/* Output (16 bytes) */
-struct hwrm_queue_buffers_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_queue_pfcenable_cfg */
/* Input (24 bytes) */
struct hwrm_queue_pfcenable_cfg_input {
@@ -2351,12 +2380,22 @@ struct hwrm_queue_pri2cos_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
__le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
u8 port_id;
u8 pri0_cos_queue_id;
u8 pri1_cos_queue_id;
@@ -2404,82 +2443,226 @@ struct hwrm_queue_cos2bw_cfg_input {
u8 queue_id0;
u8 unused_0;
__le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
__le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
__le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
__le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
__le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
__le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
__le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
__le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -2563,6 +2746,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2615,18 +2799,18 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
__le16 vnic_id;
__le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
__le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
u8 unused_0;
u8 unused_1;
__le32 max_agg_timer;
@@ -2780,15 +2964,15 @@ struct hwrm_ring_alloc_input {
__le64 resp_addr;
__le32 enables;
#define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -2804,18 +2988,36 @@ struct hwrm_ring_alloc_input {
u8 unused_4;
u8 unused_5;
__le32 reserved1;
- __le16 reserved2;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
u8 unused_6;
u8 unused_7;
__le32 reserved3;
__le32 stat_ctx_id;
__le32 reserved4;
__le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
u8 unused_8[3];
};
@@ -2842,9 +3044,9 @@ struct hwrm_ring_free_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -2942,9 +3144,9 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3068,36 +3270,36 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 t_l2_ivlan;
__le16 t_l2_ivlan_mask;
u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
u8 unused_6;
__le32 src_id;
u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_7;
__le16 dst_id;
__le16 mirror_vnic_id;
u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
u8 unused_8;
__le32 unused_9;
__le64 l2_filter_id_hint;
@@ -3246,16 +3448,16 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0;
__le32 vni;
__le32 dst_vnic_id;
@@ -3311,14 +3513,14 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
u8 unused_0;
__le16 unused_1;
__le32 encap_data[16];
@@ -3397,32 +3599,32 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
u8 src_macaddr[6];
__be16 ethertype;
u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
__be32 src_ipaddr[4];
__be32 src_ipaddr_mask[4];
__be32 dst_ipaddr[4];
@@ -3511,8 +3713,8 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0[7];
};
@@ -3539,8 +3741,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__be16 tunnel_dst_port_val;
__le32 unused_1;
@@ -3570,8 +3772,8 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__le16 tunnel_dst_port_id;
__le32 unused_1;
@@ -3720,15 +3922,15 @@ struct hwrm_fw_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
__le16 unused_0[3];
};
@@ -3739,9 +3941,9 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3759,11 +3961,11 @@ struct hwrm_fw_qstatus_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 unused_0[7];
};
@@ -3774,9 +3976,9 @@ struct hwrm_fw_qstatus_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3785,6 +3987,42 @@ struct hwrm_fw_qstatus_output {
u8 valid;
};
+/* hwrm_fw_set_time */
+/* Input (32 bytes) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
@@ -3921,32 +4159,6 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_raw_write_blk */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_write_blk_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le32 dest_addr;
- __le32 len;
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_raw_write_blk_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_nvm_read */
/* Input (40 bytes) */
struct hwrm_nvm_read_input {
@@ -4132,9 +4344,9 @@ struct hwrm_nvm_find_dir_entry_input {
u8 opt_ordinal;
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
u8 unused_1[3];
};
@@ -4266,4 +4478,41 @@ struct hwrm_nvm_verify_update_output {
u8 valid;
};
+/* hwrm_nvm_install_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 50d2007a2640..ec6cd18842c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -19,6 +19,45 @@
#include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+ struct bnxt_vf_info *vf, u16 event_id)
+{
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_async_event_cmpl *async_cmpl;
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ if (vf)
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ else
+ /* broadcast this async event to all VFs */
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ async_cmpl->type =
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->event_id = cpu_to_le16(event_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+ rc);
+ goto fwd_async_event_cmpl_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_async_event_cmpl_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
@@ -135,7 +174,8 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -146,6 +186,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -243,8 +286,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
rc = -EINVAL;
break;
}
- /* CHIMP TODO: send msg to VF to update new link state */
-
+ if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
}
@@ -525,46 +569,6 @@ err_out1:
return rc;
}
-static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
- struct bnxt_vf_info *vf,
- u16 event_id)
-{
- int rc = 0;
- struct hwrm_fwd_async_event_cmpl_input req = {0};
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_async_event_cmpl *async_cmpl;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
- if (vf)
- req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
- else
- /* broadcast this async event to all VFs */
- req.encap_async_event_target_id = cpu_to_le16(0xffff);
- async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
- async_cmpl->type =
- cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
- async_cmpl->event_id = cpu_to_le16(event_id);
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
- rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 0392670ab49c..1ab72e4820af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -12,7 +12,7 @@
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
-int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 46f904392f88..4464bc5db934 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -450,28 +450,32 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static int bcmgenet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
if (!netif_running(dev))
return -EINVAL;
- if (!dev->phydev)
+ if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_gset(dev->phydev, cmd);
+ return phy_ethtool_ksettings_get(priv->phydev, cmd);
}
-static int bcmgenet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
if (!netif_running(dev))
return -EINVAL;
- if (!dev->phydev)
+ if (!priv->phydev)
return -ENODEV;
- return phy_ethtool_sset(dev->phydev, cmd);
+ return phy_ethtool_ksettings_set(priv->phydev, cmd);
}
static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -937,7 +941,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
e->eee_active = p->eee_active;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
- return phy_ethtool_get_eee(dev->phydev, e);
+ return phy_ethtool_get_eee(priv->phydev, e);
}
static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -954,7 +958,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
+ ret = phy_init_eee(priv->phydev, 0);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
@@ -964,12 +968,14 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
bcmgenet_eee_enable_set(dev, true);
}
- return phy_ethtool_set_eee(dev->phydev, e);
+ return phy_ethtool_set_eee(priv->phydev, e);
}
static int bcmgenet_nway_reset(struct net_device *dev)
{
- return genphy_restart_aneg(dev->phydev);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return genphy_restart_aneg(priv->phydev);
}
/* standard ethtool support functions. */
@@ -977,8 +983,6 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
- .get_settings = bcmgenet_get_settings,
- .set_settings = bcmgenet_set_settings,
.get_drvinfo = bcmgenet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = bcmgenet_get_msglevel,
@@ -990,19 +994,20 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.nway_reset = bcmgenet_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
+ .get_link_ksettings = bcmgenet_get_link_ksettings,
+ .set_link_ksettings = bcmgenet_set_link_ksettings,
};
/* Power down the unimac, based on mode. */
static int bcmgenet_power_down(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
- struct net_device *ndev = priv->dev;
int ret = 0;
u32 reg;
switch (mode) {
case GENET_POWER_CABLE_SENSE:
- phy_detach(ndev->phydev);
+ phy_detach(priv->phydev);
break;
case GENET_POWER_WOL_MAGIC:
@@ -1063,6 +1068,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
int val = 0;
if (!netif_running(dev))
@@ -1072,10 +1078,10 @@ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!dev->phydev)
+ if (!priv->phydev)
val = -ENODEV;
else
- val = phy_mii_ioctl(dev->phydev, rq, cmd);
+ val = phy_mii_ioctl(priv->phydev, rq, cmd);
break;
default:
@@ -2458,7 +2464,6 @@ static void bcmgenet_irq_task(struct work_struct *work)
{
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
- struct net_device *ndev = priv->dev;
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
@@ -2471,7 +2476,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
/* Link UP/DOWN event */
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
- phy_mac_interrupt(ndev->phydev,
+ phy_mac_interrupt(priv->phydev,
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
}
@@ -2664,128 +2669,6 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
- u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- return !!(reg & (1 << (f_index % 32)));
-}
-
-static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
-}
-
-static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
- u32 f_index, u32 rx_queue)
-{
- u32 offset;
- u32 reg;
-
- offset = f_index / 8;
- reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
- reg &= ~(0xF << (4 * (f_index % 8)));
- reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
- bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
-}
-
-static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
- u32 f_index, u32 f_length)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg &= ~(0xFF << (8 * (f_index % 4)));
- reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
-}
-
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
- u32 f_index;
-
- for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
- if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
- return f_index;
-
- return -ENOMEM;
-}
-
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit 19 - nibble 0 match enable
- * bit 18 - nibble 1 match enable
- * bit 17 - nibble 2 match enable
- * bit 16 - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8 - nibble 1 data
- * bits 7:4 - nibble 2 data
- * bits 3:0 - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue)
-{
- int f_index;
- u32 i;
-
- f_index = bcmgenet_hfb_find_unused_filter(priv);
- if (f_index < 0)
- return -ENOMEM;
-
- if (f_length > priv->hw_params->hfb_filter_size)
- return -EINVAL;
-
- for (i = 0; i < f_length; i++)
- bcmgenet_hfb_writel(priv, f_data[i],
- (f_index * priv->hw_params->hfb_filter_size + i) *
- sizeof(u32));
-
- bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
- bcmgenet_hfb_enable_filter(priv, f_index);
- bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
-
- return 0;
-}
-
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
@@ -2833,7 +2716,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
/* Monitor link interrupts now */
bcmgenet_link_intr_enable(priv);
- phy_start(dev->phydev);
+ phy_start(priv->phydev);
}
static int bcmgenet_open(struct net_device *dev)
@@ -2932,7 +2815,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
netif_tx_stop_all_queues(dev);
- phy_stop(dev->phydev);
+ phy_stop(priv->phydev);
bcmgenet_intr_disable(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_disable_tx_napi(priv);
@@ -2958,7 +2841,7 @@ static int bcmgenet_close(struct net_device *dev)
bcmgenet_netif_stop(dev);
/* Really kill the PHY state machine and disconnect from it */
- phy_disconnect(dev->phydev);
+ phy_disconnect(priv->phydev);
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -3517,7 +3400,7 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
- phy_suspend(dev->phydev);
+ phy_suspend(priv->phydev);
netif_device_detach(dev);
@@ -3581,7 +3464,7 @@ static int bcmgenet_resume(struct device *d)
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
- phy_init_hw(dev->phydev);
+ phy_init_hw(priv->phydev);
/* Speed settings must be restored */
bcmgenet_mii_config(priv->dev);
@@ -3614,7 +3497,7 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
- phy_resume(dev->phydev);
+ phy_resume(priv->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f0868c56f05..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -597,6 +597,7 @@ struct bcmgenet_priv {
/* MDIO bus variables */
wait_queue_head_t wq;
+ struct phy_device *phydev;
bool internal_phy;
struct device_node *phy_dn;
struct device_node *mdio_dn;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e907acd81da9..457c3bc8cfff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -86,7 +86,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
+ struct phy_device *phydev = priv->phydev;
u32 reg, cmd_bits = 0;
bool status_changed = false;
@@ -183,9 +183,9 @@ void bcmgenet_mii_reset(struct net_device *dev)
if (GENET_IS_V4(priv))
return;
- if (dev->phydev) {
- phy_init_hw(dev->phydev);
- phy_start_aneg(dev->phydev);
+ if (priv->phydev) {
+ phy_init_hw(priv->phydev);
+ phy_start_aneg(priv->phydev);
}
}
@@ -236,7 +236,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- struct net_device *ndev = priv->dev;
u32 reg;
/* Speed settings are set in bcmgenet_mii_setup() */
@@ -245,14 +244,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
- fixed_phy_set_link_update(ndev->phydev,
+ fixed_phy_set_link_update(priv->phydev,
bcmgenet_fixed_phy_link_update);
}
int bcmgenet_mii_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
+ struct phy_device *phydev = priv->phydev;
struct device *kdev = &priv->pdev->dev;
const char *phy_name = NULL;
u32 id_mode_dis = 0;
@@ -303,7 +302,7 @@ int bcmgenet_mii_config(struct net_device *dev)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if ((phydev->supported & PHY_BASIC_FEATURES) ==
+ if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
PHY_BASIC_FEATURES)
port_ctrl = PORT_MODE_EXT_RVMII_25;
else
@@ -372,7 +371,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return -ENODEV;
}
} else {
- phydev = dev->phydev;
+ phydev = priv->phydev;
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -383,6 +382,8 @@ int bcmgenet_mii_probe(struct net_device *dev)
}
}
+ priv->phydev = phydev;
+
/* Configure port multiplexer based on what the probed PHY device since
* reading the 'max-speed' property determines the maximum supported
* PHY speed which is needed for bcmgenet_mii_config() to configure
@@ -390,7 +391,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
*/
ret = bcmgenet_mii_config(dev);
if (ret) {
- phy_disconnect(phydev);
+ phy_disconnect(priv->phydev);
return ret;
}
@@ -400,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* Ethernet MAC ISRs
*/
if (priv->internal_phy)
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ priv->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
}
@@ -605,6 +606,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
}
+ priv->phydev = phydev;
priv->phy_interface = pd->phy_interface;
return 0;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a2551bcd1027..a927a730da10 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12079,95 +12079,107 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
return ret;
}
-static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tg3_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct tg3 *tp = netdev_priv(dev);
+ u32 supported, advertising;
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
+ supported = (SUPPORTED_Autoneg);
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
- cmd->supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_TP);
- cmd->port = PORT_TP;
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP);
+ cmd->base.port = PORT_TP;
} else {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
- cmd->advertising = tp->link_config.advertising;
+ advertising = tp->link_config.advertising;
if (tg3_flag(tp, PAUSE_AUTONEG)) {
if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
- cmd->advertising |= ADVERTISED_Pause;
+ advertising |= ADVERTISED_Pause;
} else {
- cmd->advertising |= ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
}
} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
- cmd->advertising |= ADVERTISED_Asym_Pause;
+ advertising |= ADVERTISED_Asym_Pause;
}
}
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (netif_running(dev) && tp->link_up) {
- ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
- cmd->duplex = tp->link_config.active_duplex;
- cmd->lp_advertising = tp->link_config.rmt_adv;
+ cmd->base.speed = tp->link_config.active_speed;
+ cmd->base.duplex = tp->link_config.active_duplex;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.lp_advertising,
+ tp->link_config.rmt_adv);
+
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
- }
- cmd->phy_address = tp->phy_addr;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = tp->link_config.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ }
+ cmd->base.phy_address = tp->phy_addr;
+ cmd->base.autoneg = tp->link_config.autoneg;
return 0;
}
-static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int tg3_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct tg3 *tp = netdev_priv(dev);
- u32 speed = ethtool_cmd_speed(cmd);
+ u32 speed = cmd->base.speed;
+ u32 advertising;
if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
- if (cmd->autoneg != AUTONEG_ENABLE &&
- cmd->autoneg != AUTONEG_DISABLE)
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_DISABLE &&
- cmd->duplex != DUPLEX_FULL &&
- cmd->duplex != DUPLEX_HALF)
+ if (cmd->base.autoneg == AUTONEG_DISABLE &&
+ cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_HALF)
return -EINVAL;
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 mask = ADVERTISED_Autoneg |
ADVERTISED_Pause |
ADVERTISED_Asym_Pause;
@@ -12185,7 +12197,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else
mask |= ADVERTISED_FIBRE;
- if (cmd->advertising & ~mask)
+ if (advertising & ~mask)
return -EINVAL;
mask &= (ADVERTISED_1000baseT_Half |
@@ -12195,13 +12207,13 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full);
- cmd->advertising &= mask;
+ advertising &= mask;
} else {
if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
if (speed != SPEED_1000)
return -EINVAL;
- if (cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else {
if (speed != SPEED_100 &&
@@ -12212,16 +12224,16 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tg3_full_lock(tp, 0);
- tp->link_config.autoneg = cmd->autoneg;
- if (cmd->autoneg == AUTONEG_ENABLE) {
- tp->link_config.advertising = (cmd->advertising |
+ tp->link_config.autoneg = cmd->base.autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ tp->link_config.advertising = (advertising |
ADVERTISED_Autoneg);
tp->link_config.speed = SPEED_UNKNOWN;
tp->link_config.duplex = DUPLEX_UNKNOWN;
} else {
tp->link_config.advertising = 0;
tp->link_config.speed = speed;
- tp->link_config.duplex = cmd->duplex;
+ tp->link_config.duplex = cmd->base.duplex;
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -14094,8 +14106,6 @@ static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static const struct ethtool_ops tg3_ethtool_ops = {
- .get_settings = tg3_get_settings,
- .set_settings = tg3_set_settings,
.get_drvinfo = tg3_get_drvinfo,
.get_regs_len = tg3_get_regs_len,
.get_regs = tg3_get_regs,
@@ -14128,6 +14138,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_ts_info = tg3_get_ts_info,
.get_eee = tg3_get_eee,
.set_eee = tg3_set_eee,
+ .get_link_ksettings = tg3_get_link_ksettings,
+ .set_link_ksettings = tg3_set_link_ksettings,
};
static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
@@ -18122,14 +18134,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- /* We needn't recover from permanent error */
- if (state == pci_channel_io_frozen)
- tp->pcierr_recovery = true;
-
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
goto done;
+ /* We needn't recover from permanent error */
+ if (state == pci_channel_io_frozen)
+ tp->pcierr_recovery = true;
+
tg3_phy_stop(tp);
tg3_netif_stop(tp);
@@ -18226,7 +18238,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
rtnl_lock();
- if (!netif_running(netdev))
+ if (!netdev || !netif_running(netdev))
goto done;
tg3_full_lock(tp, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 0e4fdc3dd729..31f61a744d66 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -31,15 +31,10 @@
#define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
#define BNAD_NUM_TXQ_COUNTERS 5
-#define BNAD_ETHTOOL_STATS_NUM \
- (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
- sizeof(struct bnad_drv_stats) / sizeof(u64) + \
- offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_dropped",
"multicast",
"collisions",
-
"rx_length_errors",
- "rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
- "rx_fifo_errors",
- "rx_missed_errors",
-
- "tx_aborted_errors",
- "tx_carrier_errors",
"tx_fifo_errors",
- "tx_heartbeat_errors",
- "tx_window_errors",
-
- "rx_compressed",
- "tx_compressed",
"netif_queue_stop",
"netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"fc_tx_fid_parity_errors",
};
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
static int
bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_allocbuf_failed", q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
sprintf(string, "rxq%d_allocbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index",
q_num);
string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *buf)
{
struct bnad *bnad = netdev_priv(netdev);
- int i, j, bi;
+ int i, j, bi = 0;
unsigned long flags;
- struct rtnl_link_stats64 *net_stats64;
+ struct rtnl_link_stats64 net_stats64;
u64 *stats64;
u32 bmap;
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
* under the same lock
*/
spin_lock_irqsave(&bnad->bna_lock, flags);
- bi = 0;
- memset(buf, 0, stats->n_stats * sizeof(u64));
-
- net_stats64 = (struct rtnl_link_stats64 *)buf;
- bnad_netdev_qstats_fill(bnad, net_stats64);
- bnad_netdev_hwstats_fill(bnad, net_stats64);
- bi = sizeof(*net_stats64) / sizeof(u64);
+ memset(&net_stats64, 0, sizeof(net_stats64));
+ bnad_netdev_qstats_fill(bnad, &net_stats64);
+ bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+ buf[bi++] = net_stats64.rx_packets;
+ buf[bi++] = net_stats64.tx_packets;
+ buf[bi++] = net_stats64.rx_bytes;
+ buf[bi++] = net_stats64.tx_bytes;
+ buf[bi++] = net_stats64.rx_errors;
+ buf[bi++] = net_stats64.tx_errors;
+ buf[bi++] = net_stats64.rx_dropped;
+ buf[bi++] = net_stats64.tx_dropped;
+ buf[bi++] = net_stats64.multicast;
+ buf[bi++] = net_stats64.collisions;
+ buf[bi++] = net_stats64.rx_length_errors;
+ buf[bi++] = net_stats64.rx_crc_errors;
+ buf[bi++] = net_stats64.rx_frame_errors;
+ buf[bi++] = net_stats64.tx_fifo_errors;
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 18d12d35039a..30426109711c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -305,7 +305,7 @@ struct nicvf {
bool msix_enabled;
u8 num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
- char irq_name[NIC_VF_MSIX_VECTORS][20];
+ char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 7d00162a2f89..45a13f718863 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -516,7 +516,8 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
struct cqe_send_t *cqe_tx,
- int cqe_type, int budget)
+ int cqe_type, int budget,
+ unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
@@ -547,6 +548,8 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
+ (*tx_pkts)++;
+ *tx_bytes += skb->len;
napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
@@ -662,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
+ unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock);
loop:
@@ -701,7 +705,7 @@ loop:
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
(void *)cq_desc, CQE_TYPE_SEND,
- budget);
+ budget, &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -730,6 +734,9 @@ done:
netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
+ if (tx_pkts)
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
@@ -1160,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 178c5c7b0994..a4fc50155881 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1082,6 +1082,24 @@ static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
imm->len = 1;
}
+static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
+ int sq_num, int desc_cnt)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(nic->pnicvf->netdev,
+ skb_get_queue_mapping(skb));
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, desc_cnt);
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1141,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Save SKB in the last segment for freeing */
sq->skbuff[hdr_qentry] = (u64)skb;
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- /* Inform HW to xmit all TSO segments */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
return 1;
}
@@ -1219,12 +1233,8 @@ doorbell:
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
}
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
- /* Inform HW to xmit new packet */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, subdesc_cnt);
return 1;
append_fail:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 246129650967..c6b71f656992 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 45955691edc7..28e653e9c856 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -422,8 +422,8 @@ struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -437,11 +437,6 @@ enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
- MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
- MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
-
- /* # of streaming iSCSIT Rx queues */
- MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
};
enum {
@@ -458,8 +453,7 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
- MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
+ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
struct adapter;
@@ -704,10 +698,6 @@ struct sge {
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
- struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
- struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
- struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
- struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info **uld_rxq_info;
@@ -717,15 +707,8 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
- u16 iscsiqsets; /* # of active iSCSI queue sets */
- u16 niscsitq; /* # of available iSCST Rx queues */
- u16 rdmaqs; /* # of available RDMA Rx queues */
- u16 rdmaciqs; /* # of available RDMA concentrator IQs */
+ u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
- u16 iscsi_rxq[MAX_OFLD_QSETS];
- u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
- u16 rdma_rxq[MAX_RDMA_QUEUES];
- u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -749,10 +732,7 @@ struct sge {
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
-#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
-#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
-#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
-#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
+#define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
struct l2t_data;
@@ -786,6 +766,7 @@ struct uld_msix_bmap {
struct uld_msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
+ unsigned int idx;
};
struct vf_info {
@@ -818,7 +799,7 @@ struct adapter {
} msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
- unsigned int msi_idx;
+ int msi_idx;
struct doorbell_stats db_stats;
struct sge sge;
@@ -836,9 +817,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
- struct cxgb4_pci_uld_info *uld;
+ struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
+ unsigned int num_ofld_uld;
struct list_head list_node;
struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
@@ -858,6 +840,8 @@ struct adapter {
#define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
+ struct mutex uld_mutex;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -867,6 +851,9 @@ struct adapter {
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
+
+ /* TC u32 offload */
+ struct cxgb4_tc_u32_table *tc_u32;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1041,6 +1028,32 @@ enum {
VLAN_REWRITE
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter. */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct filter_ctx *ctx; /* Caller's completion hook */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+ struct net_device *dev; /* Associated net device */
+ u32 tid; /* This will store the actual tid */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
@@ -1051,6 +1064,11 @@ static inline int is_pci_uld(const struct adapter *adap)
return adap->params.crypto;
}
+static inline int is_uld(const struct adapter *adap)
+{
+ return (adap->params.offload || adap->params.crypto);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1277,6 +1295,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid);
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
@@ -1635,7 +1655,9 @@ void t4_idma_monitor(struct adapter *adapter,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr);
-void uld_mem_free(struct adapter *adap);
-int uld_mem_alloc(struct adapter *adap);
+void t4_uld_mem_free(struct adapter *adap);
+int t4_uld_mem_alloc(struct adapter *adap);
+void t4_uld_clean_up(struct adapter *adap);
+void t4_register_netevent_notifier(void);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 91fb50850fff..20455d082cb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2432,17 +2432,11 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
- int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
- int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
- int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+ int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int iscsi_idx = r - eth_entries;
- int iscsit_idx = iscsi_idx - iscsi_entries;
- int rdma_idx = iscsit_idx - iscsit_entries;
- int ciq_idx = rdma_idx - rdma_entries;
- int ctrl_idx = ciq_idx - ciq_entries;
+ int ofld_idx = r - eth_entries;
+ int ctrl_idx = ofld_idx - ofld_entries;
int fq_idx = ctrl_idx - ctrl_entries;
if (r)
@@ -2518,119 +2512,17 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (iscsi_idx < iscsi_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsirxq[iscsi_idx * 4];
+ } else if (ofld_idx < ofld_entries) {
const struct sge_ofld_txq *tx =
- &adap->sge.ofldtxq[iscsi_idx * 4];
- int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx);
+ &adap->sge.ofldtxq[ofld_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
- S("QType:", "iSCSI");
+ S("QType:", "OFLD-Txq");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (iscsit_idx < iscsit_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsitrxq[iscsit_idx * 4];
- int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
-
- S("QType:", "iSCSIT");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (rdma_idx < rdma_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.rdmarxq[rdma_idx * 4];
- int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
-
- S("QType:", "RDMA-CPL");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (ciq_idx < ciq_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
- int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
-
- S("QType:", "RDMA-CIQ");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- RL("RxAN:", stats.an);
- RL("RxNoMem:", stats.nomem);
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
@@ -2672,10 +2564,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
- DIV_ROUND_UP(adap->sge.niscsitq, 4) +
- DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
- DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+ DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2859,12 +2748,6 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
-static int blocked_fl_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -2908,7 +2791,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
static const struct file_operations blocked_fl_fops = {
.owner = THIS_MODULE,
- .open = blocked_fl_open,
+ .open = simple_open,
.read = blocked_fl_read,
.write = blocked_fl_write,
.llseek = generic_file_llseek,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
new file mode 100644
index 000000000000..10736738ff30
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -0,0 +1,721 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "l2t.h"
+#include "t4fw_api.h"
+#include "cxgb4_filter.h"
+
+static inline bool is_field_set(u32 val, u32 mask)
+{
+ return val || mask;
+}
+
+static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
+{
+ return !(conf & conf_mask) && is_field_set(val, mask);
+}
+
+/* Validate filter spec against configuration done on the card. */
+static int validate_filter(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 fconf, iconf;
+
+ /* Check for unconfigured fields being used. */
+ fconf = adapter->params.tp.vlan_pri_map;
+ iconf = adapter->params.tp.ingress_config;
+
+ if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
+ unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
+ unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
+ unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
+ fs->mask.ethtype) ||
+ unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
+ unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
+ fs->mask.matchtype) ||
+ unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
+ unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
+ fs->mask.pfvf_vld) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
+ fs->mask.ovlan_vld) ||
+ unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
+ return -EOPNOTSUPP;
+
+ /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
+ * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
+ * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
+ * below. Additionally, since the T4 firmware interface also
+ * carries that overlap, we need to translate any PF/VF
+ * specification into that internal format below.
+ */
+ if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+ return -EOPNOTSUPP;
+ if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
+ (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+ (iconf & VNIC_F)))
+ return -EOPNOTSUPP;
+ if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
+ return -ERANGE;
+ fs->mask.pf &= 0x7;
+ fs->mask.vf &= 0x7f;
+
+ /* If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* Don't allow various trivially obvious bogus out-of-range values... */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* T4 doesn't support removing VLAN Tags for loop back filters. */
+ if (is_t4(adapter->params.chip) &&
+ fs->action == FILTER_SWITCH &&
+ (fs->newvlan == VLAN_REMOVE ||
+ fs->newvlan == VLAN_REWRITE))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int get_filter_steerq(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ int iq;
+
+ /* If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ if (fs->iq)
+ return -EINVAL;
+ iq = 0;
+ } else {
+ struct port_info *pi = netdev_priv(dev);
+
+ /* If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->nqsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->ftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+ if (family == PF_INET)
+ __clear_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ spin_unlock_bh(&t->ftid_lock);
+}
+
+/* Delete the filter at a specified index. */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ len = sizeof(*fwr);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ }
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ htonl(FW_FILTER_WR_TID_V(f->tid) |
+ FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+ FW_FILTER_WR_NOREPLY_V(0) |
+ FW_FILTER_WR_IQ_V(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+ FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+ FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+ FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+ FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+ FW_FILTER_WR_PRIO_V(f->fs.prio) |
+ FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+ FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+ FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+ FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+ FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+ FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+ FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+ FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+ FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+ FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+void clear_all_filters(struct adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ unsigned int max_ftid = adapter->tids.nftids +
+ adapter->tids.nsftids;
+
+ for (i = 0; i < max_ftid; i++, f++)
+ if (f->valid || f->pending)
+ clear_filter(adapter, f);
+ }
+}
+
+/* Fill up default masks for set match fields. */
+static void fill_default_mask(struct ch_filter_specification *fs)
+{
+ unsigned int lip = 0, lip_mask = 0;
+ unsigned int fip = 0, fip_mask = 0;
+ unsigned int i;
+
+ if (fs->val.iport && !fs->mask.iport)
+ fs->mask.iport |= ~0;
+ if (fs->val.fcoe && !fs->mask.fcoe)
+ fs->mask.fcoe |= ~0;
+ if (fs->val.matchtype && !fs->mask.matchtype)
+ fs->mask.matchtype |= ~0;
+ if (fs->val.macidx && !fs->mask.macidx)
+ fs->mask.macidx |= ~0;
+ if (fs->val.ethtype && !fs->mask.ethtype)
+ fs->mask.ethtype |= ~0;
+ if (fs->val.ivlan && !fs->mask.ivlan)
+ fs->mask.ivlan |= ~0;
+ if (fs->val.ovlan && !fs->mask.ovlan)
+ fs->mask.ovlan |= ~0;
+ if (fs->val.frag && !fs->mask.frag)
+ fs->mask.frag |= ~0;
+ if (fs->val.tos && !fs->mask.tos)
+ fs->mask.tos |= ~0;
+ if (fs->val.proto && !fs->mask.proto)
+ fs->mask.proto |= ~0;
+
+ for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
+ lip |= fs->val.lip[i];
+ lip_mask |= fs->mask.lip[i];
+ fip |= fs->val.fip[i];
+ fip_mask |= fs->mask.fip[i];
+ }
+
+ if (lip && !lip_mask)
+ memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
+
+ if (fip && !fip_mask)
+ memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
+
+ if (fs->val.lport && !fs->mask.lport)
+ fs->mask.lport = ~0;
+ if (fs->val.fport && !fs->mask.fport)
+ fs->mask.fport = ~0;
+}
+
+/* Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int max_fidx, fidx;
+ struct filter_entry *f;
+ u32 iconf;
+ int iq, ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ fill_default_mask(fs);
+
+ ret = validate_filter(dev, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+ if (iq < 0)
+ return iq;
+
+ /* IPv6 filters occupy four slots and must be aligned on
+ * four-slot boundaries. IPv4 filters only occupy a single
+ * slot and have no alignment requirements but writing a new
+ * IPv4 filter into the middle of an existing IPv6 filter
+ * requires clearing the old IPv6 filter and hence we prevent
+ * insertion.
+ */
+ if (fs->type == 0) { /* IPv4 */
+ /* If our IPv4 filter isn't being written to a
+ * multiple of four filter index and there's an IPv6
+ * filter at the multiple of 4 base slot, then we
+ * prevent insertion.
+ */
+ fidx = filter_id & ~0x3;
+ if (fidx != filter_id &&
+ adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
+ fidx, fidx + 3);
+ return -EINVAL;
+ }
+ }
+ } else { /* IPv6 */
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
+
+ /* Check all except the base overlapping IPv4 filter slots. */
+ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ if (ret)
+ return ret;
+
+ /* Check to make sure the filter requested is writable ... */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ return ret;
+ }
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adapter, f);
+
+ /* Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ iconf = adapter->params.tp.ingress_config;
+ if (iconf & VNIC_F) {
+ f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
+ f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
+ /* Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(adapter, filter_id);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ clear_filter(adapter, f);
+ }
+
+ return ret;
+}
+
+/* Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ struct filter_entry *f;
+ unsigned int max_fidx;
+ int ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ f->fs.type ? PF_INET6 : PF_INET);
+ return del_filter_wr(adapter, filter_id);
+ }
+
+ /* If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ complete(&ctx->completion);
+ }
+ return ret;
+}
+
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+int cxgb4_del_filter(struct net_device *dev, int filter_id)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_del_filter(dev, filter_id, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+/* Handle a filter write/deletion reply. */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int tid = GET_TID(rpl);
+ struct filter_entry *f = NULL;
+ unsigned int max_fidx;
+ int idx;
+
+ max_fidx = adap->tids.nftids + adap->tids.nsftids;
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = TCB_COOKIE_G(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /* Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = 0;
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -ENOMEM;
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
+ }
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+ if (ctx)
+ complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
new file mode 100644
index 000000000000..23742cb1c69f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FILTER_H
+#define __CXGB4_FILTER_H
+
+#include "t4_msg.h"
+
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct adapter *adap, struct filter_entry *f);
+
+int set_filter_wr(struct adapter *adapter, int fidx);
+int delete_filter(struct adapter *adapter, unsigned int fidx);
+
+int writable_filter(struct filter_entry *f);
+void clear_all_filters(struct adapter *adapter);
+#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 44cc9767936f..cf147ca419a8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -67,6 +67,7 @@
#include <linux/crash_dump.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
@@ -77,6 +78,7 @@
#include "clip_tbl.h"
#include "l2t.h"
#include "sched.h"
+#include "cxgb4_tc_u32.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
@@ -87,30 +89,6 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
-/* Host shadow copy of ingress filter entry. This is in host native format
- * and doesn't match the ordering or bit order, etc. of the hardware of the
- * firmware command. The use of bit-field structure elements is purely to
- * remind ourselves of the field size limitations and save memory in the case
- * where the filter table is large.
- */
-struct filter_entry {
- /* Administrative fields for filter.
- */
- u32 valid:1; /* filter allocated and valid */
- u32 locked:1; /* filter is administratively locked */
-
- u32 pending:1; /* filter action is pending firmware reply */
- u32 smtidx:8; /* Source MAC Table index for smac */
- struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
-
- /* The filter itself. Most of this is a straight copy of information
- * provided by the extended ioctl(). Some fields are translated to
- * internal forms -- for instance the Ingress Queue ID passed in from
- * the ioctl() is translated into the Absolute Ingress Queue ID.
- */
- struct ch_filter_specification fs;
-};
-
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -226,11 +204,6 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
-/* Adapter list to be accessed from atomic context */
-static LIST_HEAD(adap_rcu_list);
-static DEFINE_SPINLOCK(adap_rcu_lock);
-static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
-static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
static void link_report(struct net_device *dev)
{
@@ -304,11 +277,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
txq->dcb_prio = value;
}
}
-#endif /* CONFIG_CHELSIO_T4_DCB */
-int cxgb4_dcb_enabled(const struct net_device *dev)
+static int cxgb4_dcb_enabled(const struct net_device *dev)
{
-#ifdef CONFIG_CHELSIO_T4_DCB
struct port_info *pi = netdev_priv(dev);
if (!pi->dcb.enabled)
@@ -316,11 +287,8 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
(pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
- return 0;
-#endif
}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
+#endif /* CONFIG_CHELSIO_T4_DCB */
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
@@ -532,66 +500,6 @@ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
-/* Clear a filter and release any of its resources that we own. This also
- * clears the filter's "pending" status.
- */
-static void clear_filter(struct adapter *adap, struct filter_entry *f)
-{
- /* If the new or old filter have loopback rewriteing rules then we'll
- * need to free any existing Layer Two Table (L2T) entries of the old
- * filter rule. The firmware will handle freeing up any Source MAC
- * Table (SMT) entries used for rewriting Source MAC Addresses in
- * loopback rules.
- */
- if (f->l2t)
- cxgb4_l2t_release(f->l2t);
-
- /* The zeroing of the filter rule below clears the filter valid,
- * pending, locked flags, l2t pointer, etc. so it's all we need for
- * this operation.
- */
- memset(f, 0, sizeof(*f));
-}
-
-/* Handle a filter write/deletion reply.
- */
-static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
-{
- unsigned int idx = GET_TID(rpl);
- unsigned int nidx = idx - adap->tids.ftid_base;
- unsigned int ret;
- struct filter_entry *f;
-
- if (idx >= adap->tids.ftid_base && nidx <
- (adap->tids.nftids + adap->tids.nsftids)) {
- idx = nidx;
- ret = TCB_COOKIE_G(rpl->cookie);
- f = &adap->tids.ftid_tab[idx];
-
- if (ret == FW_FILTER_WR_FLT_DELETED) {
- /* Clear the filter when we get confirmation from the
- * hardware that the filter has been deleted.
- */
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
- dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
- idx);
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_FLT_ADDED) {
- f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
- f->pending = 0; /* asynchronous setup completed */
- f->valid = 1;
- } else {
- /* Something went wrong. Issue a warning about the
- * problem and clear everything out.
- */
- dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
- idx, ret);
- clear_filter(adap, f);
- }
- }
-}
-
/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
@@ -678,56 +586,6 @@ out:
return 0;
}
-/* Flush the aggregated lro sessions */
-static void uldrx_flush_handler(struct sge_rspq *q)
-{
- if (ulds[q->uld].lro_flush)
- ulds[q->uld].lro_flush(&q->lro_mgr);
-}
-
-/**
- * uldrx_handler - response queue handler for ULD queues
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the offload message
- * @gl: the gather list of packet fragments
- *
- * Deliver an ingress offload packet to a ULD. All processing is done by
- * the ULD, we just maintain statistics.
- */
-static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl)
-{
- struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
- int ret;
-
- /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
- */
- if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
- ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
- rsp += 2;
-
- if (q->flush_handler)
- ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl, &q->lro_mgr,
- &q->napi);
- else
- ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl);
-
- if (ret) {
- rxq->stats.nomem++;
- return -1;
- }
-
- if (gl == NULL)
- rxq->stats.imm++;
- else if (gl == CXGB4_MSG_AN)
- rxq->stats.an++;
- else
- rxq->stats.pkts++;
- return 0;
-}
-
static void disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
@@ -779,30 +637,12 @@ static void name_msix_vecs(struct adapter *adap)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
}
-
- /* offload queues */
- for_each_iscsirxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
- adap->port[0]->name, i);
-
- for_each_iscsitrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
- adap->port[0]->name, i);
-
- for_each_rdmarxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
- adap->port[0]->name, i);
-
- for_each_rdmaciq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
- adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
- int iscsitqidx = 0;
+ int err, ethqidx;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -819,57 +659,9 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_iscsirxq(s, iscsiqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsirxq[iscsiqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_iscsitrxq(s, iscsitqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsitrxq[iscsitqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmarxq[rdmaqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmaciq(s, rdmaciqqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmaciq[rdmaciqqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
return 0;
unwind:
- while (--rdmaciqqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmaciq[rdmaciqqidx].rspq);
- while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmarxq[rdmaqidx].rspq);
- while (--iscsitqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsitrxq[iscsitqidx].rspq);
- while (--iscsiqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -885,16 +677,6 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_iscsirxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsirxq[i].rspq);
- for_each_iscsitrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsitrxq[i].rspq);
- for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
- for_each_rdmaciq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1033,42 +815,11 @@ static void enable_rx(struct adapter *adap)
}
}
-static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
- unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids, bool lro)
-{
- int i, err;
- for (i = 0; i < nq; i++, q++) {
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler,
- lro ? uldrx_flush_handler : NULL,
- 0);
- if (err)
- return err;
- memset(&q->stats, 0, sizeof(q->stats));
- if (ids)
- ids[i] = q->rspq.abs_id;
- }
- return 0;
-}
-
-/**
- * setup_sge_queues - configure SGE Tx/Rx/response queues
- * @adap: the adapter
- *
- * Determines how many sets of SGE queues to use and initializes them.
- * We support multiple queue sets per port if we have MSI-X, otherwise
- * just one queue set per port.
- */
-static int setup_sge_queues(struct adapter *adap)
+static int setup_fw_sge_queues(struct adapter *adap)
{
- int err, i, j;
struct sge *s = &adap->sge;
+ int err = 0;
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
@@ -1083,25 +834,27 @@ static int setup_sge_queues(struct adapter *adap)
adap->msi_idx = -((int)s->intrq.abs_id + 1);
}
- /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
- * don't forget to update the following which need to be
- * synchronized to and changes here.
- *
- * 1. The calculations of MAX_INGQ in cxgb4.h.
- *
- * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
- * to accommodate any new/deleted Ingress Queues
- * which need MSI-X Vectors.
- *
- * 3. Update sge_qinfo_show() to include information on the
- * new/deleted queues.
- */
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
- if (err) {
-freeout: t4_free_sge_resources(adap);
- return err;
- }
+ if (err)
+ t4_free_sge_resources(adap);
+ return err;
+}
+
+/**
+ * setup_sge_queues - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+ int err, i, j;
+ struct sge *s = &adap->sge;
+ struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ unsigned int cmplqid = 0;
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -1132,8 +885,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
- for_each_iscsirxq(s, i) {
+ j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_ofldtxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1141,30 +894,15 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
-#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \
- if (err) \
- goto freeout; \
- if (adap->msi_idx > 0) \
- adap->msi_idx += nq; \
-} while (0)
-
- ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
- ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
- j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
-
-#undef ALLOC_OFLD_RXQS
-
for_each_port(adap, i) {
- /*
- * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ /* Note that cmplqid below is 0 if we don't
* have RDMA queues, and that's the right value.
*/
+ if (rxq_info)
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+
err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
- s->fw_evtq.cntxt_id,
- s->rdmarxq[i].rspq.cntxt_id);
+ s->fw_evtq.cntxt_id, cmplqid);
if (err)
goto freeout;
}
@@ -1175,6 +913,9 @@ freeout: t4_free_sge_resources(adap);
RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
+freeout:
+ t4_free_sge_resources(adap);
+ return err;
}
/*
@@ -1198,151 +939,6 @@ void t4_free_mem(void *addr)
kvfree(addr);
}
-/* Send a Work Request to write the filter at a specified index. We construct
- * a Firmware Filter Work Request to have the work done and put the indicated
- * filter into "pending" mode which will prevent any further actions against
- * it till we get a reply from the firmware on the completion status of the
- * request.
- */
-static int set_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int ftid;
-
- skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- /* If the new filter requires loopback Destination MAC and/or VLAN
- * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
- * the filter.
- */
- if (f->fs.newdmac || f->fs.newvlan) {
- /* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
- f->fs.eport, f->fs.dmac);
- if (f->l2t == NULL) {
- kfree_skb(skb);
- return -ENOMEM;
- }
- }
-
- ftid = adapter->tids.ftid_base + fidx;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
-
- /* It would be nice to put most of the following in t4_hw.c but most
- * of the work is translating the cxgbtool ch_filter_specification
- * into the Work Request and the definition of that structure is
- * currently in cxgbtool.h which isn't appropriate to pull into the
- * common code. We may eventually try to come up with a more neutral
- * filter specification structure but for now it's easiest to simply
- * put this fairly direct code in line ...
- */
- fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
- fwr->tid_to_iq =
- htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_RQTYPE_V(f->fs.type) |
- FW_FILTER_WR_NOREPLY_V(0) |
- FW_FILTER_WR_IQ_V(f->fs.iq));
- fwr->del_filter_to_l2tix =
- htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
- FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
- FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
- FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
- FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
- FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
- FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
- FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
- FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
- FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
- FW_FILTER_WR_PRIO_V(f->fs.prio) |
- FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
- fwr->ethtype = htons(f->fs.val.ethtype);
- fwr->ethtypem = htons(f->fs.mask.ethtype);
- fwr->frag_to_ovlan_vldm =
- (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
- FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
- FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
- FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
- fwr->rx_chan_rx_rpl_iq =
- htons(FW_FILTER_WR_RX_CHAN_V(0) |
- FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
- fwr->maci_to_matchtypem =
- htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
- FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
- FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
- FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
- FW_FILTER_WR_PORT_V(f->fs.val.iport) |
- FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
- FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
- FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
- fwr->ptcl = f->fs.val.proto;
- fwr->ptclm = f->fs.mask.proto;
- fwr->ttyp = f->fs.val.tos;
- fwr->ttypm = f->fs.mask.tos;
- fwr->ivlan = htons(f->fs.val.ivlan);
- fwr->ivlanm = htons(f->fs.mask.ivlan);
- fwr->ovlan = htons(f->fs.val.ovlan);
- fwr->ovlanm = htons(f->fs.mask.ovlan);
- memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
- memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
- memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
- memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
- fwr->lp = htons(f->fs.val.lport);
- fwr->lpm = htons(f->fs.mask.lport);
- fwr->fp = htons(f->fs.val.fport);
- fwr->fpm = htons(f->fs.mask.fport);
- if (f->fs.newsmac)
- memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
- t4_ofld_send(adapter, skb);
- return 0;
-}
-
-/* Delete the filter at a specified index.
- */
-static int del_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int len, ftid;
-
- len = sizeof(*fwr);
- ftid = adapter->tids.ftid_base + fidx;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
- t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- t4_mgmt_tx(adapter, skb);
- return 0;
-}
-
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -1724,19 +1320,22 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
*/
static int tid_init(struct tid_info *t)
{
- size_t size;
- unsigned int stid_bmap_size;
- unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
+ unsigned int max_ftids = t->nftids + t->nsftids;
+ unsigned int natids = t->natids;
+ unsigned int stid_bmap_size;
+ unsigned int ftid_bmap_size;
+ size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ ftid_bmap_size = BITS_TO_LONGS(t->nftids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
- t->nftids * sizeof(*t->ftid_tab) +
- t->nsftids * sizeof(*t->ftid_tab);
+ max_ftids * sizeof(*t->ftid_tab) +
+ ftid_bmap_size * sizeof(long);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
@@ -1746,8 +1345,10 @@ static int tid_init(struct tid_info *t)
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
+ spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
t->sftids_in_use = 0;
@@ -1762,12 +1363,16 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
- /* Reserve stid 0 for T4/T5 adapters */
- if (!t->stid_base &&
- (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
- __set_bit(0, t->stid_bmap);
+ if (is_offload(adap)) {
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ __set_bit(0, t->stid_bmap);
+ }
+
+ bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -2317,7 +1922,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2329,7 +1934,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2337,9 +1942,10 @@ static void enable_dbs(struct adapter *adap)
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
+ enum cxgb4_uld type = CXGB4_ULD_RDMA;
+
+ if (adap->uld && adap->uld[type].handle)
+ adap->uld[type].control(adap->uld[type].handle, cmd);
}
static void process_db_full(struct work_struct *work)
@@ -2393,13 +1999,14 @@ out:
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
+
static void recover_all_queues(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2464,94 +2071,12 @@ void t4_db_dropped(struct adapter *adap)
queue_work(adap->workq, &adap->db_drop_task);
}
-static void uld_attach(struct adapter *adap, unsigned int uld)
-{
- void *handle;
- struct cxgb4_lld_info lli;
- unsigned short i;
-
- lli.pdev = adap->pdev;
- lli.pf = adap->pf;
- lli.l2t = adap->l2t;
- lli.tids = &adap->tids;
- lli.ports = adap->port;
- lli.vr = &adap->vres;
- lli.mtus = adap->params.mtus;
- if (uld == CXGB4_ULD_RDMA) {
- lli.rxq_ids = adap->sge.rdma_rxq;
- lli.ciq_ids = adap->sge.rdma_ciq;
- lli.nrxq = adap->sge.rdmaqs;
- lli.nciq = adap->sge.rdmaciqs;
- } else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.iscsi_rxq;
- lli.nrxq = adap->sge.iscsiqsets;
- } else if (uld == CXGB4_ULD_ISCSIT) {
- lli.rxq_ids = adap->sge.iscsit_rxq;
- lli.nrxq = adap->sge.niscsitq;
- }
- lli.ntxq = adap->sge.iscsiqsets;
- lli.nchan = adap->params.nports;
- lli.nports = adap->params.nports;
- lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
- lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
- lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
- lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
- lli.iscsi_ppm = &adap->iscsi_ppm;
- lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << adap->params.sge.eq_qpp;
- lli.ucq_density = 1 << adap->params.sge.iq_qpp;
- lli.filt_mode = adap->params.tp.vlan_pri_map;
- /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
- for (i = 0; i < NCHAN; i++)
- lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
- lli.fw_vers = adap->params.fw_vers;
- lli.dbfifo_int_thresh = dbfifo_int_thresh;
- lli.sge_ingpadboundary = adap->sge.fl_align;
- lli.sge_egrstatuspagesize = adap->sge.stat_len;
- lli.sge_pktshift = adap->sge.pktshift;
- lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
- lli.max_ordird_qp = adap->params.max_ordird_qp;
- lli.max_ird_adapter = adap->params.max_ird_adapter;
- lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
- lli.nodeid = dev_to_node(adap->pdev_dev);
-
- handle = ulds[uld].add(&lli);
- if (IS_ERR(handle)) {
- dev_warn(adap->pdev_dev,
- "could not attach to the %s driver, error %ld\n",
- uld_str[uld], PTR_ERR(handle));
- return;
- }
-
- adap->uld_handle[uld] = handle;
-
+void t4_register_netevent_notifier(void)
+{
if (!netevent_registered) {
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
-
- if (adap->flags & FULL_INIT_DONE)
- ulds[uld].state_change(handle, CXGB4_STATE_UP);
-}
-
-static void attach_ulds(struct adapter *adap)
-{
- unsigned int i;
-
- spin_lock(&adap_rcu_lock);
- list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
- spin_unlock(&adap_rcu_lock);
-
- mutex_lock(&uld_mutex);
- list_add_tail(&adap->list_node, &adapter_list);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (ulds[i].add)
- uld_attach(adap, i);
- mutex_unlock(&uld_mutex);
}
static void detach_ulds(struct adapter *adap)
@@ -2561,12 +2086,6 @@ static void detach_ulds(struct adapter *adap)
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i]) {
- ulds[i].state_change(adap->uld_handle[i],
- CXGB4_STATE_DETACH);
- adap->uld_handle[i] = NULL;
- }
- for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle) {
adap->uld[i].state_change(adap->uld[i].handle,
CXGB4_STATE_DETACH);
@@ -2577,10 +2096,6 @@ static void detach_ulds(struct adapter *adap)
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
-
- spin_lock(&adap_rcu_lock);
- list_del_rcu(&adap->rcu_node);
- spin_unlock(&adap_rcu_lock);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -2589,65 +2104,12 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i])
- ulds[i].state_change(adap->uld_handle[i], new_state);
- for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle)
adap->uld[i].state_change(adap->uld[i].handle,
new_state);
mutex_unlock(&uld_mutex);
}
-/**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
- *
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
- */
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
-{
- int ret = 0;
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- if (ulds[type].add) {
- ret = -EBUSY;
- goto out;
- }
- ulds[type] = *p;
- list_for_each_entry(adap, &adapter_list, list_node)
- uld_attach(adap, type);
-out: mutex_unlock(&uld_mutex);
- return ret;
-}
-EXPORT_SYMBOL(cxgb4_register_uld);
-
-/**
- * cxgb4_unregister_uld - unregister an upper-layer driver
- * @type: the ULD type
- *
- * Unregisters an existing upper-layer driver.
- */
-int cxgb4_unregister_uld(enum cxgb4_uld type)
-{
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node)
- adap->uld_handle[type] = NULL;
- ulds[type].add = NULL;
- mutex_unlock(&uld_mutex);
- return 0;
-}
-EXPORT_SYMBOL(cxgb4_unregister_uld);
-
#if IS_ENABLED(CONFIG_IPV6)
static int cxgb4_inet6addr_handler(struct notifier_block *this,
unsigned long event, void *data)
@@ -2752,7 +2214,6 @@ static int cxgb_up(struct adapter *adap)
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
-
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
@@ -2830,40 +2291,6 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
-/* Return an error number if the indicated filter isn't writable ...
- */
-static int writable_filter(struct filter_entry *f)
-{
- if (f->locked)
- return -EPERM;
- if (f->pending)
- return -EBUSY;
-
- return 0;
-}
-
-/* Delete the filter at the specified index (if valid). The checks for all
- * the common problems with doing this like the filter being locked, currently
- * pending in another operation, etc.
- */
-static int delete_filter(struct adapter *adapter, unsigned int fidx)
-{
- struct filter_entry *f;
- int ret;
-
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
- return -EINVAL;
-
- f = &adapter->tids.ftid_tab[fidx];
- ret = writable_filter(f);
- if (ret)
- return ret;
- if (f->valid)
- return del_filter_wr(adapter, fidx);
-
- return 0;
-}
-
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned char port, unsigned char mask)
@@ -3280,6 +2707,35 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+ tc->type == TC_SETUP_CLSU32) {
+ switch (tc->cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return cxgb4_config_knode(dev, proto, tc->cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return cxgb4_delete_knode(dev, proto, tc->cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -3303,6 +2759,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_busy_poll = cxgb_busy_poll,
#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
+ .ndo_setup_tc = cxgb_setup_tc,
};
#ifdef CONFIG_PCI_IOV
@@ -4262,6 +3719,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
+ adap->num_ofld_uld += 1;
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -4314,6 +3772,7 @@ static int adap_init0(struct adapter *adap)
"max_ordird_qp %d max_ird_adapter %d\n",
adap->params.max_ordird_qp,
adap->params.max_ird_adapter);
+ adap->num_ofld_uld += 2;
}
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -4324,6 +3783,8 @@ static int adap_init0(struct adapter *adap)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
+ /* LIO target and cxgb4i initiaitor */
+ adap->num_ofld_uld += 2;
}
if (caps_cmd.cryptocaps) {
/* Should query params here...TODO */
@@ -4505,10 +3966,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
/*
@@ -4523,14 +3991,14 @@ static void cfg_queues(struct adapter *adap)
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- int ciq_size;
/* Reduce memory usage in kdump environment, disable all offload.
*/
if (is_kdump_kernel()) {
adap->params.offload = 0;
adap->params.crypto = 0;
- } else if (adap->num_uld && uld_mem_alloc(adap)) {
+ } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ adap->params.offload = 0;
adap->params.crypto = 0;
}
@@ -4576,33 +4044,18 @@ static void cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
- if (is_offload(adap)) {
+ if (is_uld(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->iscsirxq),
- num_online_cpus());
- s->iscsiqsets = roundup(i, adap->params.nports);
- } else
- s->iscsiqsets = adap->params.nports;
- /* For RDMA one Rx queue per channel suffices */
- s->rdmaqs = adap->params.nports;
- /* Try and allow at least 1 CIQ per cpu rounding down
- * to the number of ports, with a minimum of 1 per port.
- * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
- * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
- * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
- */
- s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
- s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
- adap->params.nports;
- s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
-
- if (!is_t4(adap->params.chip))
- s->niscsitq = s->iscsiqsets;
+ i = num_online_cpus();
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else {
+ s->ofldqsets = adap->params.nports;
+ }
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4621,47 +4074,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsirxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSI;
- r->fl.size = 72;
- }
-
- if (!is_t4(adap->params.chip)) {
- for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsitrxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSIT;
- r->fl.size = 72;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
- struct sge_ofld_rxq *r = &s->rdmarxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 511, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- r->fl.size = 72;
- }
-
- ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
- if (ciq_size > SGE_MAX_IQ_SIZE) {
- CH_WARN(adap, "CIQ size too small for available IQs\n");
- ciq_size = SGE_MAX_IQ_SIZE;
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
- struct sge_ofld_rxq *r = &s->rdmaciq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- }
-
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
- init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
/*
@@ -4695,7 +4109,15 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int get_msix_info(struct adapter *adap)
{
struct uld_msix_info *msix_info;
- int max_ingq = (MAX_OFLD_QSETS * adap->num_uld);
+ unsigned int max_ingq = 0;
+
+ if (is_offload(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
+ if (is_pci_uld(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_uld;
+
+ if (!max_ingq)
+ goto out;
msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
@@ -4709,12 +4131,13 @@ static int get_msix_info(struct adapter *adap)
}
spin_lock_init(&adap->msix_bmap_ulds.lock);
adap->msix_info_ulds = msix_info;
+out:
return 0;
}
static void free_msix_info(struct adapter *adap)
{
- if (!adap->num_uld)
+ if (!(adap->num_uld && adap->num_ofld_uld))
return;
kfree(adap->msix_info_ulds);
@@ -4733,32 +4156,32 @@ static int enable_msix(struct adapter *adap)
struct msix_entry *entries;
int max_ingq = MAX_INGQ;
- max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
+ if (is_pci_uld(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
+ if (is_offload(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
/* map for msix */
- if (is_pci_uld(adap) && get_msix_info(adap))
+ if (get_msix_info(adap)) {
+ adap->params.offload = 0;
adap->params.crypto = 0;
+ }
for (i = 0; i < max_ingq + 1; ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
- s->niscsitq;
- /* need nchan for each possible ULD */
- if (is_t4(adap->params.chip))
- ofld_need = 3 * nchan;
- else
- ofld_need = 4 * nchan;
+ want += adap->num_ofld_uld * s->ofldqsets;
+ ofld_need = adap->num_ofld_uld * nchan;
}
if (is_pci_uld(adap)) {
- want += netif_get_num_default_rss_queues() * nchan;
- uld_need = nchan;
+ want += adap->num_uld * s->ofldqsets;
+ uld_need = adap->num_uld * nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
@@ -4786,43 +4209,25 @@ static int enable_msix(struct adapter *adap)
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
- if (is_pci_uld(adap)) {
+ if (is_uld(adap)) {
if (allocated < want)
s->nqs_per_uld = nchan;
else
- s->nqs_per_uld = netif_get_num_default_rss_queues() *
- nchan;
+ s->nqs_per_uld = s->ofldqsets;
}
- if (is_offload(adap)) {
- if (allocated < want) {
- s->rdmaqs = nchan;
- s->rdmaciqs = nchan;
-
- if (!is_t4(adap->params.chip))
- s->niscsitq = nchan;
- }
-
- /* leftovers go to OFLD */
- i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs - s->niscsitq;
- if (is_pci_uld(adap))
- i -= s->nqs_per_uld * adap->num_uld;
- s->iscsiqsets = (i / nchan) * nchan; /* round down */
-
- }
-
- for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i)
+ for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
adap->msix_info[i].vec = entries[i].vector;
- if (is_pci_uld(adap)) {
- for (j = 0 ; i < allocated; ++i, j++)
+ if (is_uld(adap)) {
+ for (j = 0 ; i < allocated; ++i, j++) {
adap->msix_info_ulds[j].vec = entries[i].vector;
+ adap->msix_info_ulds[j].idx = i;
+ }
adap->msix_bmap_ulds.mapsize = j;
}
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n",
- allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
- s->rdmaciqs, s->nqs_per_uld);
+ "nic %d per uld %d\n",
+ allocated, s->max_ethqsets, s->nqs_per_uld);
kfree(entries);
return 0;
@@ -5005,8 +4410,12 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
@@ -5034,6 +4443,7 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_cleanup_sched(adapter);
t4_free_mem(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
@@ -5378,7 +4788,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_TC;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5462,10 +4873,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
i);
}
- if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+ if (tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
adapter->params.offload = 0;
+ } else {
+ adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
+ CXGB4_MAX_LINK_HANDLE);
+ if (!adapter->tc_u32)
+ dev_warn(&pdev->dev,
+ "could not offload tc u32, continuing\n");
}
if (is_offload(adapter)) {
@@ -5535,10 +4952,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_offload(adapter))
- attach_ulds(adapter);
+ if (is_uld(adapter)) {
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adapter->list_node, &adapter_list);
+ mutex_unlock(&uld_mutex);
+ }
print_adapter_info(adapter);
+ setup_fw_sge_queues(adapter);
return 0;
sriov:
@@ -5593,8 +5014,8 @@ sriov:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
free_msix_info(adapter);
- if (adapter->num_uld)
- uld_mem_free(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -5631,7 +5052,7 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_offload(adapter))
+ if (is_uld(adapter))
detach_ulds(adapter);
disable_interrupts(adapter);
@@ -5645,21 +5066,15 @@ static void remove_one(struct pci_dev *pdev)
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
- if (adapter->tids.ftid_tab) {
- struct filter_entry *f = &adapter->tids.ftid_tab[0];
- for (i = 0; i < (adapter->tids.nftids +
- adapter->tids.nsftids); i++, f++)
- if (f->valid)
- clear_filter(adapter, f);
- }
+ clear_all_filters(adapter);
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
if (adapter->flags & USING_MSIX)
free_msix_info(adapter);
- if (adapter->num_uld)
- uld_mem_free(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
free_some_resources(adapter);
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
@@ -5690,12 +5105,58 @@ static void remove_one(struct pci_dev *pdev)
#endif
}
+/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
+ * delivery. This is essentially a stripped down version of the PCI remove()
+ * function where we do the minimal amount of work necessary to shutdown any
+ * further activity.
+ */
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ /* As with remove_one() above (see extended comment), we only want do
+ * do cleanup on PCI Devices which went all the way through init_one()
+ * ...
+ */
+ if (!adapter) {
+ pci_release_regions(pdev);
+ return;
+ }
+
+ if (adapter->pf == 4) {
+ int i;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
+ disable_interrupts(adapter);
+ disable_msi(adapter);
+
+ t4_sge_stop(adapter);
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ }
+#ifdef CONFIG_PCI_IOV
+ else {
+ if (adapter->port[0])
+ unregister_netdev(adapter->port[0]);
+ iounmap(adapter->regs);
+ kfree(adapter->vfinfo);
+ kfree(adapter);
+ pci_disable_sriov(pdev);
+ pci_release_regions(pdev);
+ }
+#endif
+}
+
static struct pci_driver cxgb4_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
- .shutdown = remove_one,
+ .shutdown = shutdown_one,
#ifdef CONFIG_PCI_IOV
.sriov_configure = cxgb4_iov_configure,
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
new file mode 100644
index 000000000000..49d2debb334e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -0,0 +1,483 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "cxgb4.h"
+#include "cxgb4_tc_u32_parse.h"
+#include "cxgb4_tc_u32.h"
+
+/* Fill ch_filter_specification with parsed match value/mask pair. */
+static int fill_match_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls,
+ const struct cxgb4_match_field *entry,
+ bool next_header)
+{
+ unsigned int i, j;
+ u32 val, mask;
+ int off, err;
+ bool found;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ mask = cls->knode.sel->keys[i].mask;
+
+ if (next_header) {
+ /* For next headers, parse only keys with offmask */
+ if (!cls->knode.sel->keys[i].offmask)
+ continue;
+ } else {
+ /* For the remaining, parse only keys without offmask */
+ if (cls->knode.sel->keys[i].offmask)
+ continue;
+ }
+
+ found = false;
+
+ for (j = 0; entry[j].val; j++) {
+ if (off == entry[j].off) {
+ found = true;
+ err = entry[j].val(fs, val, mask);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Fill ch_filter_specification with parsed action. */
+static int fill_action_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls)
+{
+ unsigned int num_actions = 0;
+ const struct tc_action *a;
+ struct tcf_exts *exts;
+ LIST_HEAD(actions);
+
+ exts = cls->knode.exts;
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ /* Don't allow more than one action per rule. */
+ if (num_actions)
+ return -EINVAL;
+
+ /* Drop in hardware. */
+ if (is_tcf_gact_shot(a)) {
+ fs->action = FILTER_DROP;
+ num_actions++;
+ continue;
+ }
+
+ /* Re-direct to specified port in hardware. */
+ if (is_tcf_mirred_redirect(a)) {
+ struct net_device *n_dev;
+ unsigned int i, index;
+ bool found = false;
+
+ index = tcf_mirred_ifindex(a);
+ for_each_port(adap, i) {
+ n_dev = adap->port[i];
+ if (index == n_dev->ifindex) {
+ fs->action = FILTER_SWITCH;
+ fs->eport = i;
+ found = true;
+ break;
+ }
+ }
+
+ /* Interface doesn't belong to any port of
+ * the underlying hardware.
+ */
+ if (!found)
+ return -EINVAL;
+
+ num_actions++;
+ continue;
+ }
+
+ /* Un-supported action. */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ const struct cxgb4_match_field *start, *link_start = NULL;
+ struct adapter *adapter = netdev2adap(dev);
+ struct ch_filter_specification fs;
+ struct cxgb4_tc_u32_table *t;
+ struct cxgb4_link *link;
+ unsigned int filter_id;
+ u32 uhtid, link_uhtid;
+ bool is_ipv6 = false;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to insert the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for insertion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+ link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Ensure link handle uhtid is sane, if specified. */
+ if (link_uhtid >= t->size)
+ return -EINVAL;
+
+ memset(&fs, 0, sizeof(fs));
+
+ if (protocol == htons(ETH_P_IPV6)) {
+ start = cxgb4_ipv6_fields;
+ is_ipv6 = true;
+ } else {
+ start = cxgb4_ipv4_fields;
+ is_ipv6 = false;
+ }
+
+ if (uhtid != 0x800) {
+ /* Link must exist from root node before insertion. */
+ if (!t->table[uhtid - 1].link_handle)
+ return -EINVAL;
+
+ /* Link must have a valid supported next header. */
+ link_start = t->table[uhtid - 1].match_field;
+ if (!link_start)
+ return -EINVAL;
+ }
+
+ /* Parse links and record them for subsequent jumps to valid
+ * next headers.
+ */
+ if (link_uhtid) {
+ const struct cxgb4_next_header *next;
+ bool found = false;
+ unsigned int i, j;
+ u32 val, mask;
+ int off;
+
+ if (t->table[link_uhtid - 1].link_handle) {
+ dev_err(adapter->pdev_dev,
+ "Link handle exists for: 0x%x\n",
+ link_uhtid);
+ return -EINVAL;
+ }
+
+ next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
+
+ /* Try to find matches that allow jumps to next header. */
+ for (i = 0; next[i].jump; i++) {
+ if (next[i].offoff != cls->knode.sel->offoff ||
+ next[i].shift != cls->knode.sel->offshift ||
+ next[i].mask != cls->knode.sel->offmask ||
+ next[i].offset != cls->knode.sel->off)
+ continue;
+
+ /* Found a possible candidate. Find a key that
+ * matches the corresponding offset, value, and
+ * mask to jump to next header.
+ */
+ for (j = 0; j < cls->knode.sel->nkeys; j++) {
+ off = cls->knode.sel->keys[j].off;
+ val = cls->knode.sel->keys[j].val;
+ mask = cls->knode.sel->keys[j].mask;
+
+ if (next[i].match_off == off &&
+ next[i].match_val == val &&
+ next[i].match_mask == mask) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue; /* Try next candidate. */
+
+ /* Candidate to jump to next header found.
+ * Translate all keys to internal specification
+ * and store them in jump table. This spec is copied
+ * later to set the actual filters.
+ */
+ ret = fill_match_fields(adapter, &fs, cls,
+ start, false);
+ if (ret)
+ goto out;
+
+ link = &t->table[link_uhtid - 1];
+ link->match_field = next[i].jump;
+ link->link_handle = cls->knode.handle;
+ memcpy(&link->fs, &fs, sizeof(fs));
+ break;
+ }
+
+ /* No candidate found to jump to next header. */
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* Fill ch_filter_specification match fields to be shipped to hardware.
+ * Copy the linked spec (if any) first. And then update the spec as
+ * needed.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
+ /* Copy linked ch_filter_specification */
+ memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
+ ret = fill_match_fields(adapter, &fs, cls,
+ link_start, true);
+ if (ret)
+ goto out;
+ }
+
+ ret = fill_match_fields(adapter, &fs, cls, start, false);
+ if (ret)
+ goto out;
+
+ /* Fill ch_filter_specification action fields to be shipped to
+ * hardware.
+ */
+ ret = fill_action_fields(adapter, &fs, cls);
+ if (ret)
+ goto out;
+
+ /* The filter spec has been completely built from the info
+ * provided from u32. We now set some default fields in the
+ * spec for sanity.
+ */
+
+ /* Match only packets coming from the ingress port where this
+ * filter will be created.
+ */
+ fs.val.iport = netdev2pinfo(dev)->port_id;
+ fs.mask.iport = ~0;
+
+ /* Enable filter hit counts. */
+ fs.hitcnts = 1;
+
+ /* Set type of filter - IPv6 or IPv4 */
+ fs.type = is_ipv6 ? 1 : 0;
+
+ /* Set the filter */
+ ret = cxgb4_set_filter(dev, filter_id, &fs);
+ if (ret)
+ goto out;
+
+ /* If this is a linked bucket, then set the corresponding
+ * entry in the bitmap to mark it as belonging to this linked
+ * bucket.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
+ set_bit(filter_id, t->table[uhtid - 1].tid_map);
+
+out:
+ return ret;
+}
+
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int filter_id, max_tids, i, j;
+ struct cxgb4_link *link = NULL;
+ struct cxgb4_tc_u32_table *t;
+ u32 handle, uhtid;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to delete the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for deletion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ handle = cls->knode.handle;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Delete the specified filter */
+ if (uhtid != 0x800) {
+ link = &t->table[uhtid - 1];
+ if (!link->link_handle)
+ return -EINVAL;
+
+ if (!test_bit(filter_id, link->tid_map))
+ return -EINVAL;
+ }
+
+ ret = cxgb4_del_filter(dev, filter_id);
+ if (ret)
+ goto out;
+
+ if (link)
+ clear_bit(filter_id, link->tid_map);
+
+ /* If a link is being deleted, then delete all filters
+ * associated with the link.
+ */
+ max_tids = adapter->tids.nftids;
+ for (i = 0; i < t->size; i++) {
+ link = &t->table[i];
+
+ if (link->link_handle == handle) {
+ for (j = 0; j < max_tids; j++) {
+ if (!test_bit(j, link->tid_map))
+ continue;
+
+ ret = __cxgb4_del_filter(dev, j, NULL);
+ if (ret)
+ goto out;
+
+ clear_bit(j, link->tid_map);
+ }
+
+ /* Clear the link state */
+ link->match_field = NULL;
+ link->link_handle = 0;
+ memset(&link->fs, 0, sizeof(link->fs));
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+void cxgb4_cleanup_tc_u32(struct adapter *adap)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!adap->tc_u32)
+ return;
+
+ /* Free up all allocated memory. */
+ t = adap->tc_u32;
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ t4_free_mem(link->tid_map);
+ }
+ t4_free_mem(adap->tc_u32);
+}
+
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!size)
+ return NULL;
+
+ t = t4_alloc_mem(sizeof(*t) +
+ (size * sizeof(struct cxgb4_link)));
+ if (!t)
+ return NULL;
+
+ t->size = size;
+
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+ unsigned int bmap_size;
+ unsigned int max_tids;
+
+ max_tids = adap->tids.nftids;
+ bmap_size = BITS_TO_LONGS(max_tids);
+ link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
+ if (!link->tid_map)
+ goto out_no_mem;
+ bitmap_zero(link->tid_map, max_tids);
+ }
+
+ return t;
+
+out_no_mem:
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ if (link->tid_map)
+ t4_free_mem(link->tid_map);
+ }
+
+ if (t)
+ t4_free_mem(t);
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
new file mode 100644
index 000000000000..6bdc885eff22
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_H
+#define __CXGB4_TC_U32_H
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_MAX_LINK_HANDLE 32
+
+static inline bool can_tc_u32_offload(struct net_device *dev)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+
+void cxgb4_cleanup_tc_u32(struct adapter *adapter);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size);
+#endif /* __CXGB4_TC_U32_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
new file mode 100644
index 000000000000..a4b99edcc339
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -0,0 +1,294 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_PARSE_H
+#define __CXGB4_TC_U32_PARSE_H
+
+struct cxgb4_match_field {
+ int off; /* Offset from the beginning of the header to match */
+ /* Fill the value/mask pair in the spec if matched */
+ int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+};
+
+/* IPv4 match fields */
+static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ u32 mask_val;
+ u8 frag_val;
+
+ frag_val = (ntohl(val) >> 13) & 0x00000007;
+ mask_val = ntohl(mask) & 0x0000FFFF;
+
+ if (frag_val == 0x1 && mask_val != 0x3FFF) { /* MF set */
+ f->val.frag = 1;
+ f->mask.frag = 1;
+ } else if (frag_val == 0x2 && mask_val != 0x3FFF) { /* DF set */
+ f->val.frag = 0;
+ f->mask.frag = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv4_tos },
+ { .off = 4, .val = cxgb4_fill_ipv4_frag },
+ { .off = 8, .val = cxgb4_fill_ipv4_proto },
+ { .off = 12, .val = cxgb4_fill_ipv4_src_ip },
+ { .off = 16, .val = cxgb4_fill_ipv4_dst_ip },
+ { .val = NULL }
+};
+
+/* IPv6 match fields */
+static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[4], &val, sizeof(u32));
+ memcpy(&f->mask.fip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[8], &val, sizeof(u32));
+ memcpy(&f->mask.fip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[12], &val, sizeof(u32));
+ memcpy(&f->mask.fip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[4], &val, sizeof(u32));
+ memcpy(&f->mask.lip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[8], &val, sizeof(u32));
+ memcpy(&f->mask.lip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[12], &val, sizeof(u32));
+ memcpy(&f->mask.lip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv6_tos },
+ { .off = 4, .val = cxgb4_fill_ipv6_proto },
+ { .off = 8, .val = cxgb4_fill_ipv6_src_ip0 },
+ { .off = 12, .val = cxgb4_fill_ipv6_src_ip1 },
+ { .off = 16, .val = cxgb4_fill_ipv6_src_ip2 },
+ { .off = 20, .val = cxgb4_fill_ipv6_src_ip3 },
+ { .off = 24, .val = cxgb4_fill_ipv6_dst_ip0 },
+ { .off = 28, .val = cxgb4_fill_ipv6_dst_ip1 },
+ { .off = 32, .val = cxgb4_fill_ipv6_dst_ip2 },
+ { .off = 36, .val = cxgb4_fill_ipv6_dst_ip3 },
+ { .val = NULL }
+};
+
+/* TCP/UDP match */
+static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.fport = ntohl(val) >> 16;
+ f->mask.fport = ntohl(mask) >> 16;
+ f->val.lport = ntohl(val) & 0x0000FFFF;
+ f->mask.lport = ntohl(mask) & 0x0000FFFF;
+
+ return 0;
+};
+
+static const struct cxgb4_match_field cxgb4_tcp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+static const struct cxgb4_match_field cxgb4_udp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+struct cxgb4_next_header {
+ unsigned int offset; /* Offset to next header */
+ /* offset, shift, and mask added to offset above
+ * to get to next header. Useful when using a header
+ * field's value to jump to next header such as IHL field
+ * in IPv4 header.
+ */
+ unsigned int offoff;
+ u32 shift;
+ u32 mask;
+ /* match criteria to make this jump */
+ unsigned int match_off;
+ u32 match_val;
+ u32 match_mask;
+ /* location of jump to make */
+ const struct cxgb4_match_field *jump;
+};
+
+/* Accept a rule with a jump to transport layer header based on IHL field in
+ * IPv4 header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
+ * to get to transport layer header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+struct cxgb4_link {
+ const struct cxgb4_match_field *match_field; /* Next header */
+ struct ch_filter_specification fs; /* Match spec associated with link */
+ u32 link_handle; /* Knode handle associated with the link */
+ unsigned long *tid_map; /* Bitmap for filter tids */
+};
+
+struct cxgb4_tc_u32_table {
+ unsigned int size; /* number of entries in table */
+ struct cxgb4_link table[0]; /* Jump table */
+};
+#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 5d402bace6c1..b4b2d20aab3c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -82,6 +82,24 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
spin_unlock_irqrestore(&bmap->lock, flags);
}
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ struct adapter *adap = q->adap;
+
+ if (adap->uld[q->uld].lro_flush)
+ adap->uld[q->uld].lro_flush(&q->lro_mgr);
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
@@ -124,8 +142,8 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
unsigned short *ids = rxq_info->rspq_id + offset;
unsigned int per_chan = nq / adap->params.nports;
- unsigned int msi_idx, bmap_idx;
- int i, err;
+ unsigned int bmap_idx = 0;
+ int i, err, msi_idx;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -135,14 +153,14 @@ static int alloc_uld_rxqs(struct adapter *adap,
for (i = 0; i < nq; i++, q++) {
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
- adap->msi_idx++;
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
- adap->msi_idx,
+ msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
- NULL,
+ lro ? uldrx_flush_handler : NULL,
0);
if (err)
goto freeout;
@@ -159,7 +177,6 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
/* We need to free rxq also in case of ciq allocation failure */
@@ -169,26 +186,47 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
}
return err;
}
-int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
+static int
+setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int i, ret = 0;
if (adap->flags & USING_MSIX) {
- rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq,
+ rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
+ sizeof(unsigned short),
GFP_KERNEL);
if (!rxq_info->msix_tbl)
return -ENOMEM;
}
- return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
!alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
rxq_info->nrxq, lro));
+
+ /* Tell uP to route control queue completions to rdma rspq */
+ if (adap->flags & FULL_INIT_DONE &&
+ !ret && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ unsigned int cmplqid;
+ u32 param, cmdop;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+ return ret;
}
static void t4_free_uld_rxqs(struct adapter *adap, int n,
@@ -198,14 +236,28 @@ static void t4_free_uld_rxqs(struct adapter *adap, int n,
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
}
-void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
+static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ u32 param, cmdop, cmplqid = 0;
+ int i;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+
if (rxq_info->nciq)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
@@ -214,27 +266,39 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
kfree(rxq_info->msix_tbl);
}
-int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
- const struct cxgb4_pci_uld_info *uld_info)
+static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
+ const struct cxgb4_uld_info *uld_info)
{
struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info;
- int i, nrxq;
+ int i, nrxq, ciq_size;
rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
if (!rxq_info)
return -ENOMEM;
- if (uld_info->nrxq > s->nqs_per_uld)
- rxq_info->nrxq = s->nqs_per_uld;
- else
- rxq_info->nrxq = uld_info->nrxq;
- if (!uld_info->nciq)
+ if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ i = s->nqs_per_uld;
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ } else {
+ i = min_t(int, uld_info->nrxq,
+ num_online_cpus());
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ }
+ if (!uld_info->ciq) {
rxq_info->nciq = 0;
- else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld)
- rxq_info->nciq = s->nqs_per_uld;
- else
- rxq_info->nciq = uld_info->nciq;
+ } else {
+ if (adap->flags & USING_MSIX)
+ rxq_info->nciq = min_t(int, s->nqs_per_uld,
+ num_online_cpus());
+ else
+ rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
+ num_online_cpus());
+ rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
+ adap->params.nports);
+ rxq_info->nciq = max_t(int, rxq_info->nciq,
+ adap->params.nports);
+ }
nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
@@ -245,7 +309,7 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
}
rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
- if (!rxq_info->uldrxq) {
+ if (!rxq_info->rspq_id) {
kfree(rxq_info->uldrxq);
kfree(rxq_info);
return -ENOMEM;
@@ -259,12 +323,17 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
r->fl.size = 72;
}
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
for (i = rxq_info->nrxq; i < nrxq; i++) {
struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
- init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64);
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
r->rspq.uld = uld_type;
- r->fl.size = 72;
}
memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
@@ -273,7 +342,7 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
return 0;
}
-void free_queues_uld(struct adapter *adap, unsigned int uld_type)
+static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
@@ -282,10 +351,12 @@ void free_queues_uld(struct adapter *adap, unsigned int uld_type)
kfree(rxq_info);
}
-int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
+static int
+request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx, bmap_idx, err = 0;
+ int err = 0;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
@@ -298,7 +369,7 @@ int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
}
return 0;
unwind:
- while (--idx >= 0) {
+ while (idx-- > 0) {
bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
@@ -307,13 +378,14 @@ unwind:
return err;
}
-void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
+static void
+free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- unsigned int bmap_idx = rxq_info->msix_tbl[idx];
+ bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
@@ -321,14 +393,14 @@ void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
}
}
-void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
+static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc);
- int idx;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- unsigned int bmap_idx = rxq_info->msix_tbl[idx];
+ bmap_idx = rxq_info->msix_tbl[idx];
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx);
@@ -361,7 +433,7 @@ static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
}
}
-void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
+static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
@@ -370,7 +442,7 @@ void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
}
-void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
+static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx;
@@ -390,15 +462,15 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
lli->nciq = rxq_info->nciq;
}
-int uld_mem_alloc(struct adapter *adap)
+int t4_uld_mem_alloc(struct adapter *adap)
{
struct sge *s = &adap->sge;
- adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL);
+ adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
if (!adap->uld)
return -ENOMEM;
- s->uld_rxq_info = kzalloc(adap->num_uld *
+ s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
sizeof(struct sge_uld_rxq_info *),
GFP_KERNEL);
if (!s->uld_rxq_info)
@@ -410,7 +482,7 @@ err_uld:
return -ENOMEM;
}
-void uld_mem_free(struct adapter *adap)
+void t4_uld_mem_free(struct adapter *adap)
{
struct sge *s = &adap->sge;
@@ -418,6 +490,26 @@ void uld_mem_free(struct adapter *adap)
kfree(adap->uld);
}
+void t4_uld_clean_up(struct adapter *adap)
+{
+ struct sge_uld_rxq_info *rxq_info;
+ unsigned int i;
+
+ if (!adap->uld)
+ return;
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ if (!adap->uld[i].handle)
+ continue;
+ rxq_info = adap->sge.uld_rxq_info[i];
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, i);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, i);
+ free_sge_queues_uld(adap, i);
+ free_queues_uld(adap, i);
+ }
+}
+
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
{
int i;
@@ -429,10 +521,15 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.iscsiqsets;
+ lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lld->iscsi_ppm = &adap->iscsi_ppm;
lld->adapter_type = adap->params.chip;
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
@@ -472,23 +569,37 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
}
adap->uld[uld].handle = handle;
+ t4_register_netevent_notifier();
if (adap->flags & FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
-int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
- struct cxgb4_pci_uld_info *p)
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
{
int ret = 0;
+ unsigned int adap_idx = 0;
struct adapter *adap;
- if (type >= CXGB4_PCI_ULD_MAX)
+ if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
- if (!is_pci_uld(adap))
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
ret = cfg_queues_uld(adap, type, p);
if (ret)
@@ -510,11 +621,14 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
}
adap->uld[type] = *p;
uld_attach(adap, type);
+ adap_idx++;
}
mutex_unlock(&uld_mutex);
return 0;
free_irq:
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
@@ -522,21 +636,49 @@ free_rxq:
free_queues:
free_queues_uld(adap, type);
out:
+
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ if (!adap_idx)
+ break;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ adap_idx--;
+ }
mutex_unlock(&uld_mutex);
return ret;
}
-EXPORT_SYMBOL(cxgb4_register_pci_uld);
+EXPORT_SYMBOL(cxgb4_register_uld);
-int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
{
struct adapter *adap;
- if (type >= CXGB4_PCI_ULD_MAX)
+ if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
- if (!is_pci_uld(adap))
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
@@ -551,4 +693,4 @@ int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
return 0;
}
-EXPORT_SYMBOL(cxgb4_unregister_pci_uld);
+EXPORT_SYMBOL(cxgb4_unregister_uld);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index ab4037222f8d..47bd14f602db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -42,6 +42,8 @@
#include <linux/atomic.h>
#include "cxgb4.h"
+#define MAX_ULD_QSETS 16
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
@@ -104,6 +106,7 @@ struct tid_info {
unsigned int atid_base;
struct filter_entry *ftid_tab;
+ unsigned long *ftid_bmap;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -124,6 +127,8 @@ struct tid_info {
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
+ /* lock for setting/clearing filter bitmap */
+ spinlock_t ftid_lock;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -183,15 +188,38 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
+/* Filter operation context to allow callers of cxgb4_set_filter() and
+ * cxgb4_del_filter() to wait for an asynchronous completion.
+ */
+struct filter_ctx {
+ struct completion completion; /* completion rendezvous */
+ void *closure; /* caller's opaque information */
+ int result; /* result of operation */
+ u32 tid; /* to store tid */
+};
+
+struct ch_filter_specification;
+
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx);
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs);
+int cxgb4_del_filter(struct net_device *dev, int filter_id);
+
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
}
enum cxgb4_uld {
+ CXGB4_ULD_INIT,
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
+ CXGB4_ULD_CRYPTO,
CXGB4_ULD_MAX
};
@@ -284,31 +312,11 @@ struct cxgb4_lld_info {
struct cxgb4_uld_info {
const char *name;
- void *(*add)(const struct cxgb4_lld_info *p);
- int (*rx_handler)(void *handle, const __be64 *rsp,
- const struct pkt_gl *gl);
- int (*state_change)(void *handle, enum cxgb4_state new_state);
- int (*control)(void *handle, enum cxgb4_control control, ...);
- int (*lro_rx_handler)(void *handle, const __be64 *rsp,
- const struct pkt_gl *gl,
- struct t4_lro_mgr *lro_mgr,
- struct napi_struct *napi);
- void (*lro_flush)(struct t4_lro_mgr *);
-};
-
-enum cxgb4_pci_uld {
- CXGB4_PCI_ULD1,
- CXGB4_PCI_ULD_MAX
-};
-
-struct cxgb4_pci_uld_info {
- const char *name;
- bool lro;
void *handle;
unsigned int nrxq;
- unsigned int nciq;
unsigned int rxq_size;
- unsigned int ciq_size;
+ bool ciq;
+ bool lro;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
@@ -323,9 +331,6 @@ struct cxgb4_pci_uld_info {
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
-int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
- struct cxgb4_pci_uld_info *p);
-int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 9a607dbc6ca8..1e74fd6085df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2860,6 +2860,18 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return 0;
}
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid)
+{
+ u32 param, val;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
+ FW_PARAMS_PARAM_YZ_V(eqid));
+ val = cmplqid;
+ return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+}
+
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid)
{
@@ -3014,12 +3026,6 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- /* clean up RDMA and iSCSI Rx queues */
- t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
- t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
-
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 15be54324e3d..20dec85da63d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
lc = &pi->link_cfg;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ffe4bf4b96da..4b58b32105f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2267,6 +2267,12 @@ enum fw_port_cap {
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define FW_PORT_CAP_SPEED_S 0
+#define FW_PORT_CAP_SPEED_M 0x3f
+#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+ (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_UNCHANGED,
FW_PORT_CAP_MDI_AUTO,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8067424ad4a8..b3903fe411aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -108,8 +108,8 @@ struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 879f4c52b3d5..e98248f00fef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+ FW_PORT_CAP_ANEG)
/**
* init_link_config - initialize a link's SW state
@@ -1716,8 +1717,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 15d02da08d8f..9cffe48be156 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4382,7 +4382,7 @@ err:
}
/* This routine returns a list of all the NIC PF_nums in the adapter */
-u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
{
struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
struct be_pcie_res_desc *pcie = NULL;
@@ -4534,7 +4534,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4907,8 +4907,9 @@ err:
return status;
}
-int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
- int link_state, int version, u8 domain)
+static int
+__be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, int version, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 34f63eff6e8a..dcb930a52613 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -44,7 +44,7 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
/* Per-module error detection/recovery workq shared across all functions.
* Each function schedules its own work request on this shared workq.
*/
-struct workqueue_struct *be_err_recovery_workq;
+static struct workqueue_struct *be_err_recovery_workq;
static const struct pci_device_id be_dev_ids[] = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -60,7 +60,7 @@ static const struct pci_device_id be_dev_ids[] = {
MODULE_DEVICE_TABLE(pci, be_dev_ids);
/* Workqueue used by all functions for defering cmd calls to the adapter */
-struct workqueue_struct *be_wq;
+static struct workqueue_struct *be_wq;
/* UE Status Low CSR */
static const char * const ue_status_low_desc[] = {
@@ -1895,7 +1895,8 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1907,6 +1908,9 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
status = be_set_vf_tvt(adapter, vf, vlan);
@@ -4365,7 +4369,7 @@ static void be_setup_init(struct be_adapter *adapter)
* for distribution between the VFs. This self-imposed limit will determine the
* no: of VFs for which RSS can be enabled.
*/
-void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
{
struct be_port_resources port_res = {0};
u8 rss_tables_on_port;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 36361f8bf894..262587240c86 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -60,6 +60,8 @@ struct ftgmac100 {
struct ftgmac100_descs *descs;
dma_addr_t descs_dma_addr;
+ struct page *rx_pages[RX_QUEUE_ENTRIES];
+
unsigned int rx_pointer;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
@@ -77,6 +79,9 @@ struct ftgmac100 {
int int_mask_all;
bool use_ncsi;
bool enabled;
+
+ u32 rxdes0_edorr_mask;
+ u32 txdes0_edotr_mask;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
@@ -257,10 +262,11 @@ static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
}
-static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
/* clear status bits */
- rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
@@ -298,9 +304,10 @@ static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
}
-static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
@@ -341,18 +348,27 @@ static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
}
+static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
+{
+ return &priv->rx_pages[rxdes - priv->descs->rxdes];
+}
+
/*
* rxdes2 is not used by hardware. We use it to keep track of page.
* Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
*/
-static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes,
+ struct page *page)
{
- rxdes->rxdes2 = (unsigned int)page;
+ *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
}
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- return (struct page *)rxdes->rxdes2;
+ return *ftgmac100_rxdes_page_slot(priv, rxdes);
}
/******************************************************************************
@@ -382,7 +398,7 @@ ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
if (ftgmac100_rxdes_first_segment(rxdes))
return rxdes;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
}
@@ -453,7 +469,7 @@ static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
} while (!done && ftgmac100_rxdes_packet_ready(rxdes));
@@ -501,7 +517,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
do {
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
unsigned int size;
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
@@ -545,10 +561,11 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
/******************************************************************************
* internal functions (transmit descriptor)
*****************************************************************************/
-static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
/* clear all except end of ring bit */
- txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
txdes->txdes1 = 0;
txdes->txdes2 = 0;
txdes->txdes3 = 0;
@@ -569,9 +586,10 @@ static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
}
-static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
@@ -690,7 +708,7 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
dev_kfree_skb(skb);
- ftgmac100_txdes_reset(txdes);
+ ftgmac100_txdes_reset(priv, txdes);
ftgmac100_tx_clean_pointer_advance(priv);
@@ -779,9 +797,9 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
return -ENOMEM;
}
- ftgmac100_rxdes_set_page(rxdes, page);
+ ftgmac100_rxdes_set_page(priv, rxdes, page);
ftgmac100_rxdes_set_dma_addr(rxdes, map);
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
return 0;
}
@@ -791,7 +809,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
if (!page)
@@ -828,7 +846,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
return -ENOMEM;
/* initialize RX ring */
- ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+ ftgmac100_rxdes_set_end_of_ring(priv,
+ &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
@@ -838,7 +857,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
}
/* initialize TX ring */
- ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ ftgmac100_txdes_set_end_of_ring(priv,
+ &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
err:
@@ -1055,14 +1075,12 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
}
if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
- FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG)) {
+ FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
if (net_ratelimit())
- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
- status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
- status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+ status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
if (status & FTGMAC100_INT_NO_RXBUF) {
/* RX buffer unavailable */
@@ -1092,6 +1110,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int status;
int err;
err = ftgmac100_alloc_buffers(priv);
@@ -1117,6 +1136,11 @@ static int ftgmac100_open(struct net_device *netdev)
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+
+ /* Clear stale interrupts */
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
if (netdev->phydev)
phy_start(netdev->phydev);
else if (priv->use_ncsi)
@@ -1166,6 +1190,8 @@ static int ftgmac100_stop(struct net_device *netdev)
napi_disable(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
+ else if (priv->use_ncsi)
+ ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1226,12 +1252,21 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
int i, err = 0;
+ u32 reg;
/* initialize mdio bus */
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus)
return -EIO;
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ /* This driver supports the old MDIO interface */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
+ reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
+ };
+
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1355,9 +1390,18 @@ static int ftgmac100_probe(struct platform_device *pdev)
FTGMAC100_INT_XPKT_ETH |
FTGMAC100_INT_XPKT_LOST |
FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG |
FTGMAC100_INT_RPKT_BUF |
FTGMAC100_INT_NO_RXBUF);
+
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ priv->rxdes0_edorr_mask = BIT(30);
+ priv->txdes0_edotr_mask = BIT(30);
+ } else {
+ priv->rxdes0_edorr_mask = BIT(15);
+ priv->txdes0_edotr_mask = BIT(15);
+ }
+
if (pdev->dev.of_node &&
of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
@@ -1367,7 +1411,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
- priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 13408d448b05..a7ce0ac8858a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -134,6 +134,11 @@
#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31)
/*
+ * Feature Register
+ */
+#define FTGMAC100_REVR_NEW_MDIO_INTERFACE BIT(31)
+
+/*
* Receive buffer size register
*/
#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff)
@@ -152,6 +157,7 @@
#define FTGMAC100_MACCR_FULLDUP (1 << 8)
#define FTGMAC100_MACCR_GIGA_MODE (1 << 9)
#define FTGMAC100_MACCR_CRC_APD (1 << 10)
+#define FTGMAC100_MACCR_PHY_LINK_LEVEL (1 << 11)
#define FTGMAC100_MACCR_RX_RUNT (1 << 12)
#define FTGMAC100_MACCR_JUMBO_LF (1 << 13)
#define FTGMAC100_MACCR_RX_ALL (1 << 14)
@@ -189,7 +195,6 @@ struct ftgmac100_txdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
-#define FTGMAC100_TXDES0_EDOTR (1 << 15)
#define FTGMAC100_TXDES0_CRC_ERR (1 << 19)
#define FTGMAC100_TXDES0_LTS (1 << 28)
#define FTGMAC100_TXDES0_FTS (1 << 29)
@@ -215,7 +220,6 @@ struct ftgmac100_rxdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_RXDES0_VDBC 0x3fff
-#define FTGMAC100_RXDES0_EDORR (1 << 15)
#define FTGMAC100_RXDES0_MULTICAST (1 << 16)
#define FTGMAC100_RXDES0_BROADCAST (1 << 17)
#define FTGMAC100_RXDES0_RX_ERR (1 << 18)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index fb5c63881340..48a033e64423 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
+ .driver_data = FEC_QUIRK_USE_GASKET,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_HAS_RACC,
+ .driver_data = 0,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -180,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC receive acceleration */
#define FEC_RACC_IPDIS (1 << 1)
#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
/*
@@ -912,13 +913,11 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too,
* so need to reconfigure it.
*/
- if (fep->quirks & FEC_QUIRK_ENET_MAC) {
- memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel((__force u32)cpu_to_be32(temp_mac[0]),
- fep->hwp + FEC_ADDR_LOW);
- writel((__force u32)cpu_to_be32(temp_mac[1]),
- fep->hwp + FEC_ADDR_HIGH);
- }
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel((__force u32)cpu_to_be32(temp_mac[0]),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((__force u32)cpu_to_be32(temp_mac[1]),
+ fep->hwp + FEC_ADDR_HIGH);
/* Clear any outstanding interrupt. */
writel(0xffffffff, fep->hwp + FEC_IEVENT);
@@ -945,9 +944,11 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- /* set RX checksum */
val = readl(fep->hwp + FEC_RACC);
+ /* align IP header */
+ val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ /* set RX checksum */
val |= FEC_RACC_OPTIONS;
else
val &= ~FEC_RACC_OPTIONS;
@@ -1428,6 +1429,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4);
data = skb->data;
+
+#if !defined(CONFIG_M5272)
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ data = skb_pull_inline(skb, 2);
+#endif
+
if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 415ffa1509d5..39778892b3b3 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -600,7 +600,7 @@ static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
{
struct hip04_priv *priv;
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index ca68e2208b5b..ced185962ef8 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -940,8 +940,8 @@ static int hisi_femac_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-int hisi_femac_drv_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
@@ -957,7 +957,7 @@ int hisi_femac_drv_suspend(struct platform_device *pdev,
return 0;
}
-int hisi_femac_drv_resume(struct platform_device *pdev)
+static int hisi_femac_drv_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d7e1f8c7ae92..059aaeda46b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -994,10 +994,10 @@ static void hns_nic_adjust_link(struct net_device *ndev)
struct hnae_handle *h = priv->ae_handle;
int state = 1;
- if (priv->phy) {
+ if (ndev->phydev) {
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
- state = priv->phy->link;
+ state = ndev->phydev->link;
}
state = state && h->dev->ops->get_status(h);
@@ -1022,7 +1022,6 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
- struct hns_nic_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1046,8 +1045,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
- priv->phy = phy_dev;
-
return 0;
}
@@ -1224,8 +1221,8 @@ static int hns_nic_net_up(struct net_device *ndev)
if (ret)
goto out_start_err;
- if (priv->phy)
- phy_start(priv->phy);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
clear_bit(NIC_STATE_DOWN, &priv->state);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
@@ -1259,8 +1256,8 @@ static void hns_nic_net_down(struct net_device *ndev)
netif_tx_disable(ndev);
priv->link = 0;
- if (priv->phy)
- phy_stop(priv->phy);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
ops = priv->ae_handle->dev->ops;
@@ -1359,8 +1356,7 @@ static void hns_nic_net_timeout(struct net_device *ndev)
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
if (!netif_running(netdev))
return -EINVAL;
@@ -2017,9 +2013,8 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
hns_nic_uninit_ring_data(priv);
priv->ring_data = NULL;
- if (priv->phy)
- phy_disconnect(priv->phy);
- priv->phy = NULL;
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
if (!IS_ERR_OR_NULL(priv->ae_handle))
hnae_put_handle(priv->ae_handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 44bb3015eed3..5b412de350aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -59,7 +59,6 @@ struct hns_nic_priv {
u32 port_id;
int phy_mode;
int phy_led_val;
- struct phy_device *phy;
struct net_device *netdev;
struct device *dev;
struct hnae_handle *ae_handle;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 5eb3245bdf86..47e59bbfd061 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -48,9 +48,9 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
h = priv->ae_handle;
- if (priv->phy) {
- if (!genphy_read_status(priv->phy))
- link_stat = priv->phy->link;
+ if (net_dev->phydev) {
+ if (!genphy_read_status(net_dev->phydev))
+ link_stat = net_dev->phydev->link;
else
link_stat = 0;
}
@@ -64,15 +64,14 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
}
static void hns_get_mdix_mode(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
int mdix_ctrl, mdix, retval, is_resolved;
- struct hns_nic_priv *priv = netdev_priv(net_dev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = net_dev->phydev;
if (!phy_dev || !phy_dev->mdio.bus) {
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
return;
}
@@ -89,35 +88,35 @@ static void hns_get_mdix_mode(struct net_device *net_dev,
switch (mdix_ctrl) {
case 0x0:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
break;
case 0x1:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
break;
case 0x3:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
break;
default:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
break;
}
if (!is_resolved)
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
else if (mdix)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
/**
- *hns_nic_get_settings - implement ethtool get settings
+ *hns_nic_get_link_ksettings - implement ethtool get link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -125,6 +124,7 @@ static int hns_nic_get_settings(struct net_device *net_dev,
int ret;
u8 duplex;
u16 speed;
+ u32 supported, advertising;
if (!priv || !priv->ae_handle)
return -ESRCH;
@@ -139,38 +139,43 @@ static int hns_nic_get_settings(struct net_device *net_dev,
return -EINVAL;
}
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
/* When there is no phy, autoneg is off. */
- cmd->autoneg = false;
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.autoneg = false;
+ cmd->base.cmd = speed;
+ cmd->base.duplex = duplex;
- if (priv->phy)
- (void)phy_ethtool_gset(priv->phy, cmd);
+ if (net_dev->phydev)
+ (void)phy_ethtool_ksettings_get(net_dev->phydev, cmd);
link_stat = hns_nic_get_link(net_dev);
if (!link_stat) {
- ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = (u32)SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- if (cmd->autoneg)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg)
+ advertising |= ADVERTISED_Autoneg;
- cmd->supported |= h->if_support;
+ supported |= h->if_support;
if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_1000baseT_Full;
} else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_10000baseKR_Full;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_10000baseKR_Full;
}
switch (h->media_type) {
case HNAE_MEDIA_TYPE_FIBER:
- cmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case HNAE_MEDIA_TYPE_COPPER:
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
case HNAE_MEDIA_TYPE_UNKNOWN:
default:
@@ -178,23 +183,27 @@ static int hns_nic_get_settings(struct net_device *net_dev,
}
if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
- cmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22;
hns_get_mdix_mode(net_dev, cmd);
return 0;
}
/**
- *hns_nic_set_settings - implement ethtool set settings
+ *hns_nic_set_link_settings - implement ethtool set link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -208,24 +217,25 @@ static int hns_nic_set_settings(struct net_device *net_dev,
return -ENODEV;
h = priv->ae_handle;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
- cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ speed != SPEED_10000 ||
+ cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
+ if (!net_dev->phydev && cmd->base.autoneg == AUTONEG_ENABLE)
return -EINVAL;
- if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
- if (priv->phy)
- return phy_ethtool_sset(priv->phy, cmd);
+ if (net_dev->phydev)
+ return phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if ((speed != SPEED_10 && speed != SPEED_100 &&
- speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL))
+ speed != SPEED_1000) || (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
} else {
netdev_err(net_dev, "Not supported!");
@@ -233,7 +243,7 @@ static int hns_nic_set_settings(struct net_device *net_dev,
}
if (h->dev->ops->adjust_link) {
- h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
+ h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
return 0;
}
@@ -305,7 +315,7 @@ static int __lb_setup(struct net_device *ndev,
{
int ret = 0;
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = ndev->phydev;
struct hnae_handle *h = priv->ae_handle;
switch (loop) {
@@ -910,7 +920,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
ETH_GSTRING_LEN);
buff += ETH_GSTRING_LEN;
- if ((priv->phy) && (!priv->phy->is_c45))
+ if ((netdev->phydev) && (!netdev->phydev->is_c45))
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
ETH_GSTRING_LEN);
@@ -996,7 +1006,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
- if ((!priv->phy) || (priv->phy->is_c45))
+ if ((!netdev->phydev) || (netdev->phydev->is_c45))
cnt--;
return cnt;
@@ -1015,8 +1025,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
@@ -1039,7 +1048,7 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
int ret;
if (phy_dev)
@@ -1159,8 +1168,7 @@ static int hns_get_regs_len(struct net_device *net_dev)
static int hns_nic_nway_reset(struct net_device *netdev)
{
int ret = 0;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = netdev->phydev;
if (netif_running(netdev)) {
if (phy)
@@ -1267,8 +1275,6 @@ static int hns_get_rxnfc(struct net_device *netdev,
static const struct ethtool_ops hns_ethtool_ops = {
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
- .get_settings = hns_nic_get_settings,
- .set_settings = hns_nic_set_settings,
.get_ringparam = hns_get_ringparam,
.get_pauseparam = hns_get_pauseparam,
.set_pauseparam = hns_set_pauseparam,
@@ -1288,6 +1294,8 @@ static const struct ethtool_ops hns_ethtool_ops = {
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
.get_rxnfc = hns_get_rxnfc,
+ .get_link_ksettings = hns_nic_get_link_ksettings,
+ .set_link_ksettings = hns_nic_set_link_ksettings,
};
void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ec4d0f3b7850..8f139197f1aa 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
dev->mcast_pending = 1;
return;
}
+
+ mutex_lock(&dev->link_lock);
__emac_set_multicast_list(dev);
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct sockaddr *addr = sa;
+ struct emac_regs __iomem *p = dev->emacp;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mutex_lock(&dev->link_lock);
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
}
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
.ndo_change_mtu = eth_change_mtu,
};
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit_sg,
.ndo_change_mtu = emac_change_mtu,
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 2e1b17ad52a3..ad03763e009a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -334,7 +334,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_err("ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
e_info("registered PHC clock\n");
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 67ff01aeb11a..4d19e46f7c55 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -507,7 +507,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
- int vf_idx, u16 vid, u8 qos);
+ int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index adb7cb4311ba..5241e0873397 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1182,6 +1182,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index d9dec81f6b6d..5f4dac0d36ef 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -445,7 +445,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
}
int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -460,6 +460,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
if (qos || (vid > (VLAN_VID_MASK - 1)))
return -EINVAL;
+ /* VF VLAN Protocol part to default is unsupported */
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
vf_info = &iov_data->vf_info[vf_idx];
/* exit if there is nothing to do */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 0d39103124bd..5de937852436 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -56,7 +56,8 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name);
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_driver_name);
fm10k_dbg_init();
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 19103a6a7dcc..2030d7c1dc94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -74,7 +74,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
-#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
@@ -701,6 +701,8 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -708,8 +710,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 05cf9a719bab..0c1875b5b16d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1054,6 +1054,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_dcbx_config *r_cfg =
&pf->hw.remote_dcbx_config;
int i, ret;
+ u32 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
@@ -1063,8 +1064,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ vsi = pf->vsi[pf->lan_vsi];
+ switch_id =
+ vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+
ret = i40e_aq_query_port_ets_config(&pf->hw,
- pf->mac_seid,
+ switch_id,
bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1425,84 +1430,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
kfree(desc);
desc = NULL;
- } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
- (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_filter fd_data;
- u16 packet_len, i, j = 0;
- char *asc_packet;
- u8 *raw_packet;
- bool add = false;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
-
- if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
- if (!asc_packet)
- goto command_write_done;
-
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
-
- if (!raw_packet) {
- kfree(asc_packet);
- asc_packet = NULL;
- goto command_write_done;
- }
-
- cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
- &fd_data.q_index,
- &fd_data.flex_off, &fd_data.pctype,
- &fd_data.dest_vsi, &fd_data.dest_ctl,
- &fd_data.fd_status, &fd_data.cnt_index,
- &fd_data.fd_id, &packet_len, asc_packet);
- if (cnt != 10) {
- dev_info(&pf->pdev->dev,
- "program fd_filter: bad command string, cnt=%d\n",
- cnt);
- kfree(asc_packet);
- asc_packet = NULL;
- kfree(raw_packet);
- goto command_write_done;
- }
-
- /* fix packet length if user entered 0 */
- if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
-
- /* make sure to check the max as well */
- packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
-
- for (i = 0; i < packet_len; i++) {
- cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
- if (!cnt)
- break;
- j += 3;
- }
- dev_info(&pf->pdev->dev, "FD raw packet dump\n");
- print_hex_dump(KERN_INFO, "FD raw packet: ",
- DUMP_PREFIX_OFFSET, 16, 1,
- raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
- if (!ret) {
- dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
- } else {
- dev_info(&pf->pdev->dev,
- "Filter command send failed %d\n", ret);
- }
- kfree(raw_packet);
- raw_packet = NULL;
- kfree(asc_packet);
- asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
@@ -1727,8 +1654,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " globr\n");
dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
- dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1835186b62c9..92bc8846f1ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1970,11 +1970,22 @@ static int i40e_set_phys_id(struct net_device *netdev,
* 125us (8000 interrupts per second) == ITR(62)
*/
+/**
+ * __i40e_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
static int __i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *rx_ring, *tx_ring;
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
@@ -1989,14 +2000,18 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting))
+ rx_ring = vsi->rx_rings[queue];
+ tx_ring = vsi->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
@@ -2010,18 +2025,44 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_get_coalesce - get a netdev's coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ *
+ * Gets the coalesce settings for a particular netdev. Note that if user has
+ * modified per-queue settings, this only guarantees to represent queue 0. See
+ * __i40e_get_coalesce for more details.
+ **/
static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
+ * @netdev: netdev structure
+ * @ec: ethtool's coalesce settings
+ * @queue: the particular queue to read
+ *
+ * Will read a specific queue's coalesce settings
+ **/
static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, queue);
}
+/**
+ * i40e_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+
static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
@@ -2060,6 +2101,14 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
i40e_flush(hw);
}
+/**
+ * __i40e_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
static int __i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
@@ -2120,12 +2169,27 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_set_coalesce - set coalesce settings for every queue on the netdev
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ *
+ * This will set each queue to the same coalesce settings.
+ **/
static int i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_set_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the specified queue's coalesce settings.
+ **/
static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
@@ -2922,15 +2986,13 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
u8 *seed = NULL;
u16 i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
- return 0;
-
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
@@ -2948,8 +3010,12 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ if (indir)
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ else
+ i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
+ vsi->rss_size);
return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
I40E_HLUT_ARRAY_SIZE);
@@ -3019,6 +3085,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+
+ /* flush current ATR settings */
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 61b0fc433d37..ac1faee2a5b8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 12
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -57,8 +57,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -1317,7 +1315,7 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
/* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
- I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
@@ -1910,7 +1908,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
if (f->vlan == I40E_VLAN_ANY) {
del_list[num_del].vlan_tag = 0;
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
} else {
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan));
@@ -5244,7 +5242,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
@@ -5941,13 +5939,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR */
+
+ /* Wait for some more space to be available to turn on ATR. We also
+ * must check that no existing ntuple rules for TCP are in effect
+ */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->fd_tcp_rule == 0)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -5978,9 +5980,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int fd_room;
int reg;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
@@ -6000,7 +5999,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6020,7 +6019,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -6054,9 +6053,6 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
@@ -7156,9 +7152,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
pf->pending_udp_bitmap &= ~BIT_ULL(i);
port = pf->udp_ports[i].index;
if (port)
- ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
- pf->udp_ports[i].type,
- NULL, NULL);
+ ret = i40e_aq_add_udp_tunnel(hw, port,
+ pf->udp_ports[i].type,
+ NULL, NULL);
else
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -7645,7 +7641,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left--;
} else {
pf->num_fdsb_msix = 0;
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -7665,6 +7660,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
#endif
/* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ iwarp_requested = pf->num_iwarp_msix;
+
if (!vectors_left)
pf->num_iwarp_msix = 0;
else if (vectors_left < pf->num_iwarp_msix)
@@ -7678,18 +7675,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
- /* if we're short on vectors for what's desired, we limit
- * the queues per vmdq. If this is still more than are
- * available, the user will need to change the number of
- * queues/vectors used by the PF later with the ethtool
- * channels command
- */
- if (vmdq_vecs < vmdq_vecs_wanted)
- pf->num_vmdq_qps = 1;
- pf->num_vmdq_msix = pf->num_vmdq_qps;
+ if (!vectors_left) {
+ pf->num_vmdq_msix = 0;
+ pf->num_vmdq_qps = 0;
+ } else {
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vmdq_vecs < vmdq_vecs_wanted)
+ pf->num_vmdq_qps = 1;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
- v_budget += vmdq_vecs;
- vectors_left -= vmdq_vecs;
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
}
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
@@ -7701,21 +7703,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->msix_entries[i].entry = i;
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
- if (v_actual != v_budget) {
- /* If we have limited resources, we will start with no vectors
- * for the special features and then allocate vectors to some
- * of these features based on the policy and at the end disable
- * the features that did not get any vectors.
- */
- iwarp_requested = pf->num_iwarp_msix;
- pf->num_iwarp_msix = 0;
-#ifdef I40E_FCOE
- pf->num_fcoe_qps = 0;
- pf->num_fcoe_msix = 0;
-#endif
- pf->num_vmdq_msix = 0;
- }
-
if (v_actual < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -7729,9 +7716,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (v_actual != v_budget) {
+ } else if (!vectors_left) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
int vec;
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector limit reached, attempting to redistribute vectors\n");
/* reserve the misc vector */
vec = v_actual - 1;
@@ -7739,7 +7733,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1;
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+#ifdef I40E_FCOE
+ pf->num_fcoe_qps = 0;
+ pf->num_fcoe_msix = 0;
+#endif
/* partition out the remaining vectors */
switch (vec) {
@@ -7771,9 +7768,14 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ pf->num_fdsb_msix = 1;
+ vec--;
+ }
pf->num_lan_msix = min_t(int,
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix);
+ pf->num_lan_qps = pf->num_lan_msix;
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7785,6 +7787,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->num_fdsb_msix == 0)) {
+ dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ }
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
(pf->num_vmdq_msix == 0)) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
@@ -7803,6 +7810,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
+ i40e_debug(&pf->hw, I40E_DEBUG_INIT,
+ "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
+ pf->num_lan_msix,
+ pf->num_vmdq_msix * pf->num_vmdq_vsis,
+ pf->num_fdsb_msix,
+ pf->num_iwarp_msix);
+
return v_actual;
}
@@ -8244,8 +8258,8 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
*/
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size)
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
{
u16 i;
@@ -8286,6 +8300,8 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -8610,7 +8626,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
@@ -8685,13 +8700,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ }
}
return need_reset;
}
@@ -11338,11 +11353,7 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown the adminq */
- ret_code = i40e_shutdown_adminq(hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the Admin Queue resources: %d\n",
- ret_code);
+ i40e_shutdown_adminq(hw);
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
@@ -11389,6 +11400,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+ if (!pf) {
+ dev_info(&pdev->dev,
+ "Cannot recover - error happened during device probe\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
rtnl_lock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ed39cbad24bd..f1feceab758a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -669,7 +669,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
pf->ptp_clock = NULL;
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
- } else {
+ } else if (pf->ptp_clock) {
struct timespec64 ts;
u32 regval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f8d66236fcbf..6287bf63c43c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -40,6 +40,69 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
}
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_fdir - Generate a Flow Director descriptor based on fdata
+ * @tx_ring: Tx ring to send buffer on
+ * @fdata: Flow director filter data
+ * @add: Indicate if we are adding a rule or deleting one
+ *
+ **/
+static void i40e_fdir(struct i40e_ring *tx_ring,
+ struct i40e_fdir_filter *fdata, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ u32 flex_ptype, dtype_cmd;
+ u16 i;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
+ (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
+ (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ /* Use LAN VSI Id if not programmed by user */
+ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
+ ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= add ?
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT :
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
+ (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
+ (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+
+ if (fdata->cnt_index) {
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
+ ((u32)fdata->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ }
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
+}
+
#define I40E_FD_CLEAN_DELAY 10
/**
* i40e_program_fdir_filter - Program a Flow Director filter
@@ -48,14 +111,13 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* @pf: The PF pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add)
+static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
+ u8 *raw_packet, struct i40e_pf *pf,
+ bool add)
{
- struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf, *first;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
- unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -92,56 +154,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* grab the next descriptor */
i = tx_ring->next_to_use;
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
first = &tx_ring->tx_bi[i];
- memset(first, 0, sizeof(struct i40e_tx_buffer));
-
- tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
-
- fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
-
- fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
- I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
-
- fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
- I40E_TXD_FLTR_QW0_PCTYPE_MASK;
-
- /* Use LAN VSI Id if not programmed by user */
- if (fdir_data->dest_vsi == 0)
- fpt |= (pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
- else
- fpt |= ((u32)fdir_data->dest_vsi <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
- I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
-
- dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- if (add)
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- else
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
-
- dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
- I40E_TXD_FLTR_QW1_DEST_MASK;
-
- dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
- I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
-
- if (fdir_data->cnt_index != 0) {
- dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dcc |= ((u32)fdir_data->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- }
-
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
- fdir_desc->rsvd = cpu_to_le32(0);
- fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
- fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+ i40e_fdir(tx_ring, fdir_data, add);
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
@@ -282,18 +296,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
@@ -532,7 +546,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -545,9 +562,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -584,8 +598,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -754,8 +767,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1864,6 +1877,15 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->rx_rings[idx]->rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->tx_rings[idx]->tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1879,6 +1901,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1887,18 +1910,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -2621,9 +2647,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -2654,8 +2678,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2787,9 +2810,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2814,13 +2835,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b78c810d1835..508840585645 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index c92a3bdee229..f861d3109d1a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -163,6 +163,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index da3423561b3a..54b8ee2583f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -502,8 +502,16 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u32 qtx_ctl;
int ret = 0;
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ ret = -ENOENT;
+ goto error_context;
+ }
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_context;
+ }
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -1476,7 +1484,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ !vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2217,8 +2226,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret);
}
@@ -2747,11 +2756,12 @@ error_param:
* @vf_id: VF identifier
* @vlan_id: mac address
* @qos: priority setting
+ * @vlan_proto: vlan protocol
*
* program VF vlan id and/or qos
**/
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos)
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -2774,6 +2784,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ ret = -EPROTONOSUPPORT;
+ goto error_pvid;
+ }
+
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 875174141451..4012d069939a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -129,8 +129,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 4db0c0326185..7953c13451b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -302,7 +302,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = le16_to_cpu(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
u16 i = 0;
@@ -326,6 +325,8 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ u16 len = le16_to_cpu(aq_desc->datalen);
+
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 0130458264e5..75f2a2cdd738 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -51,7 +51,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -64,9 +67,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1312,6 +1311,19 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->rx_rings[idx].rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->tx_rings[idx].tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1326,6 +1338,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
+ int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1334,18 +1348,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -1832,9 +1849,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -1865,8 +1880,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2015,9 +2029,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2042,13 +2054,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0112277e5882..abcdecabbc56 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -287,6 +287,14 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* high bit set means dynamic, use accessors routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
@@ -445,4 +453,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index f04ce6cb70dc..bd691ad86673 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -160,6 +160,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index dc00aaf94687..c5fd724313c7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -59,13 +59,6 @@ struct i40e_vsi {
unsigned long state;
int base_vector;
u16 work_limit;
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware only supports 2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
u16 qs_handle;
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index e17a15456266..a9940154eead 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -296,93 +296,207 @@ static int i40evf_set_ringparam(struct net_device *netdev,
}
/**
- * i40evf_get_coalesce - Get interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
+ * __i40evf_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
*
- * Returns current coalescing settings. This is referred to elsewhere in the
- * driver as Interrupt Throttle Rate, as this is how the hardware describes
- * this functionality.
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
**/
-static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int __i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_ring *rx_ring, *tx_ring;
ec->tx_max_coalesced_frames = vsi->work_limit;
ec->rx_max_coalesced_frames = vsi->work_limit;
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ /* Rx and Tx usecs per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= adapter->num_active_queues)
+ return -EINVAL;
+
+ rx_ring = &adapter->rx_rings[queue];
+ tx_ring = &adapter->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
/**
- * i40evf_set_coalesce - Set interrupt coalescing settings
+ * i40evf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
*
- * Change current coalescing settings.
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality. Note that if per-queue settings have been modified this
+ * only represents the settings of queue 0.
**/
-static int i40evf_set_coalesce(struct net_device *netdev,
+static int i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_hw *hw = &adapter->hw;
+ return __i40evf_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
+ * @netdev: netdev to read
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to read
+ *
+ * Read specific queue's coalesce settings.
+ **/
+static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40evf_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
+ u16 vector;
+
+ adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
+ adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (ec->use_adaptive_rx_coalesce)
+ adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = adapter->rx_rings[queue].q_vector;
+ q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+
+ q_vector = adapter->tx_rings[queue].q_vector;
+ q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * __i40evf_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-
- else
+ if (ec->rx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_rx_coalesce)
+ netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+ } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- else if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
else
+ if (ec->tx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_tx_coalesce)
+ netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+ } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if (ec->use_adaptive_rx_coalesce)
- vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = &adapter->q_vectors[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ /* Rx and Tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < adapter->num_active_queues; i++)
+ i40evf_set_itr_per_queue(adapter, ec, i);
+ } else if (queue < adapter->num_active_queues) {
+ i40evf_set_itr_per_queue(adapter, ec, queue);
+ } else {
+ netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ adapter->num_active_queues - 1);
+ return -EINVAL;
}
return 0;
}
/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings for every queue.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to modify
+ *
+ * Modifies a specific queue's coalesce settings.
+ */
+static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, queue);
+}
+
+/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -533,6 +647,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f751f7bc0d81..14372810fc27 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 12
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -370,6 +370,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
+ struct i40e_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
@@ -377,7 +378,10 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
+ q_vector->ring_mask |= BIT(r_idx);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
}
/**
@@ -391,6 +395,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
+ struct i40e_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
@@ -398,9 +403,10 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= BIT(t_idx);
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
}
/**
@@ -1007,7 +1013,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
**/
-static int i40evf_up_complete(struct i40evf_adapter *adapter)
+static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
clear_bit(__I40E_DOWN, &adapter->vsi.state);
@@ -1016,7 +1022,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- return 0;
}
/**
@@ -1037,6 +1042,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
@@ -1154,6 +1160,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
+ tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1162,6 +1169,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
+ rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
return 0;
@@ -1731,6 +1739,7 @@ static void i40evf_reset_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
i40evf_free_traffic_irqs(adapter);
@@ -1769,6 +1778,7 @@ continue_reset:
if (netif_running(adapter->netdev)) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
}
i40evf_irq_disable(adapter);
@@ -1783,8 +1793,7 @@ continue_reset:
i40evf_free_all_tx_resources(adapter);
/* kill and reinit the admin queue */
- if (i40evf_shutdown_adminq(hw))
- dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
+ i40evf_shutdown_adminq(hw);
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
@@ -1824,9 +1833,7 @@ continue_reset:
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto reset_err;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
} else {
@@ -2056,9 +2063,7 @@ static int i40evf_open(struct net_device *netdev)
i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto err_req_irq;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
@@ -2272,10 +2277,6 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2457,6 +2458,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
netif_carrier_off(netdev);
+ adapter->link_up = false;
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index cc6cb30c1667..ddf478d6322b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -898,8 +898,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
vpe->event_data.link_event.link_status) {
adapter->link_up =
vpe->event_data.link_event.link_status;
+ if (adapter->link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
i40evf_print_link_message(adapter);
- netif_tx_stop_all_queues(netdev);
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
@@ -974,8 +980,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
- netif_tx_start_all_queues(adapter->netdev);
- netif_carrier_on(adapter->netdev);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 03fbe4b7663b..d11093dce1b9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -489,6 +489,7 @@ struct igb_adapter {
struct timecounter tc;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+ bool pps_sys_wrap_on;
struct ptp_pin_desc sdp_config[IGB_N_SDP];
struct {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 0c33eca7c832..737b664d004c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2704,8 +2704,8 @@ static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter,
return 0;
}
-int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
- struct igb_nfc_filter *input)
+static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
+ struct igb_nfc_filter *input)
{
struct e1000_hw *hw = &adapter->hw;
u8 vlan_priority;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index af75eac5fa16..edc9a6ac5169 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -58,7 +58,7 @@
#include "igb.h"
#define MAJ 5
-#define MIN 3
+#define MIN 4
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
@@ -169,7 +169,7 @@ static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos);
+ int vf, u16 vlan, u8 qos, __be16 vlan_proto);
static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
@@ -6222,14 +6222,17 @@ static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
return 0;
}
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct igb_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
igb_disable_port_vlan(adapter, vf);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 66dfa2085cc7..a7895c4cbcc3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -591,6 +591,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
tsim |= TSINTR_SYS_WRAP;
else
tsim &= ~TSINTR_SYS_WRAP;
+ igb->pps_sys_wrap_on = !!on;
wr32(E1000_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
@@ -1159,7 +1160,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
@@ -1235,7 +1236,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i211:
wr32(E1000_TSAUXC, 0x0);
wr32(E1000_TSSDP, 0x0);
- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
+ wr32(E1000_TSIM,
+ TSYNC_INTERRUPTS |
+ (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0));
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b0778ba65083..12bb877df860 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
#include "igbvf.h"
-#define DRV_VERSION "2.0.2-k"
+#define DRV_VERSION "2.4.0-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9547191e26c9..f49f80380aa5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -313,6 +313,25 @@ static int ixgbe_get_settings(struct net_device *netdev,
break;
}
+ /* Indicate pause support */
+ ecmd->supported |= SUPPORTED_Pause;
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ ecmd->advertising |= ADVERTISED_Pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ ecmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ break;
+ case ixgbe_fc_tx_pause:
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ break;
+ default:
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ }
+
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
@@ -2928,9 +2947,13 @@ static u32 ixgbe_rss_indir_size(struct net_device *netdev)
static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
{
int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
+ u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
for (i = 0; i < reta_size; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -3041,8 +3064,8 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- /* SR-IOV currently only allows one queue on the PF */
- max_combined = 1;
+ /* Limit value based on the queue mask */
+ max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index bcdc88444ceb..15ab337fd7ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -515,15 +515,16 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
+ if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
- /* 32 pool mode with 4 queues per pool */
+ /* 32 pool mode with up to 4 queues per pool */
} else {
vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
rss_m = IXGBE_RSS_4Q_MASK;
- rss_i = 4;
+ /* We can support 4, 2, or 1 queues */
+ rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
}
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d76bc1a313ea..a244d9a67264 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3248,7 +3248,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
else if (tcs > 1)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mtqc |= IXGBE_MTQC_32VF;
else
mtqc |= IXGBE_MTQC_64VF;
@@ -3475,12 +3476,12 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- /* Program table for at least 2 queues w/ SR-IOV so that VFs can
+ /* Program table for at least 4 queues w/ SR-IOV so that VFs can
* make full use of any rings they may have. We will use the
* PSRTYPE register to control how many rings we use within the PF.
*/
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
- rss_i = 2;
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
+ rss_i = 4;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -3544,7 +3545,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
else if (tcs > 1)
mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
@@ -4105,23 +4107,20 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
- /* legacy case, we can just disable VLAN filtering */
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ } else {
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
+ /* Nothing to do for 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
/* We are already in VLAN promisc, nothing to do */
if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
return;
@@ -4129,10 +4128,6 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
- /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4204,19 +4199,9 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
+ hw->mac.type == ixgbe_mac_82598EB)
return;
- }
/* We are not in VLAN promisc, nothing to do */
if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index db0731e05401..021ab9b89c71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -346,8 +346,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
}
}
- /* clear value if nothing found */
- hw->phy.mdio.prtad = 0;
+ /* indicate no PHY found */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
return IXGBE_ERR_PHY_ADDR_INVALID;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index e5431bfe3339..a92277683a64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1254,7 +1254,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
return err;
- } else
+ } else if (adapter->ptp_clock)
e_dev_info("registered PHC device on %s\n", netdev->name);
/* set default timestamp mode to disabled here. We do this in
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8618599dfd6f..7e5d9850e4b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -329,13 +329,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
for (i = 0; i < adapter->num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ ixgbe_sriov_reinit(adapter);
+
err = pci_enable_sriov(dev, num_vfs);
if (err) {
e_dev_warn("Failed to enable PCI sriov: %d\n", err);
return err;
}
ixgbe_get_vfs(adapter);
- ixgbe_sriov_reinit(adapter);
return num_vfs;
#else
@@ -1354,13 +1356,16 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
return err;
}
-int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 vlan_proto)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
if (vlan || qos) {
/* Check if there is already a port VLAN set, if so
* we have to delete the old one first before we
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 47e65e2f886a..0c7977d27b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -43,7 +43,7 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e092a8929413..7e6b9267ca9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1459,7 +1459,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* Configure internal PHY for KR/KX. */
ixgbe_setup_kr_speed_x550em(hw, speed);
- if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
return IXGBE_ERR_PHY_ADDR_INVALID;
/* Get external PHY device id */
@@ -2125,7 +2125,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2147,7 +2147,7 @@ s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -3049,6 +3049,8 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_x550a,
.write_reg = &ixgbe_write_phy_reg_x550a,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4044608083cd..7eaac3234049 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1810,8 +1810,10 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
dev_err(&adapter->pdev->dev,
"Failed to set MTU at %d\n", netdev->mtu);
@@ -3758,8 +3760,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 8982c882af1b..a0d1b084ecec 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -211,8 +211,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!dev->regs) {
dev_err(&pdev->dev, "Unable to remap SMI register\n");
- ret = -ENODEV;
- goto out_mdio;
+ return -ENODEV;
}
init_waitqueue_head(&dev->smi_busy_wait);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8e4252dd9a9d..bfada880e5eb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -634,8 +634,9 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
}
/* Get System Network Statistics */
-struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static struct rtnl_link_stats64 *
+mvneta_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mvneta_port *pp = netdev_priv(dev);
unsigned int start;
@@ -3506,8 +3507,9 @@ static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Ethtool methods */
/* Set link ksettings (phy address, speed) for ethtools */
-int mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
+static int
+mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct mvneta_port *pp = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 522fe8dda6c3..ad4ab979507b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -52,7 +52,7 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "esw", "gp1", "gp2"
+ "ethif", "esw", "gp1", "gp2", "trgpll"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -135,6 +135,33 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
return _mtk_mdio_read(eth, phy_addr, phy_reg);
}
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+{
+ u32 val;
+ int ret;
+
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_CLK_SEL362_5,
+ ETHSYS_TRGMII_CLK_SEL362_5);
+
+ val = (speed == SPEED_1000) ? 250000000 : 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+ val = (speed == SPEED_1000) ?
+ RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+ val = (speed == SPEED_1000) ?
+ TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -148,7 +175,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return;
- switch (mac->phy_dev->speed) {
+ switch (dev->phydev->speed) {
case SPEED_1000:
mcr |= MAC_MCR_SPEED_1000;
break;
@@ -157,20 +184,23 @@ static void mtk_phy_link_adjust(struct net_device *dev)
break;
};
- if (mac->phy_dev->link)
+ if (mac->id == 0 && !mac->trgmii)
+ mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
+
+ if (dev->phydev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex) {
+ if (dev->phydev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
+ if (dev->phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
- if (mac->phy_dev->asym_pause)
+ if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Pause)
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
@@ -187,7 +217,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
- if (mac->phy_dev->link)
+ if (dev->phydev->link)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
@@ -196,17 +226,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
struct device_node *phy_node)
{
- const __be32 *_addr = NULL;
struct phy_device *phydev;
- int phy_mode, addr;
-
- _addr = of_get_property(phy_node, "reg", NULL);
+ int phy_mode;
- if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
- pr_err("%s: invalid phy address\n", phy_node->name);
- return -EINVAL;
- }
- addr = be32_to_cpu(*_addr);
phy_mode = of_get_phy_mode(phy_node);
if (phy_mode < 0) {
dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
@@ -225,17 +247,17 @@ static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
mac->id, phydev_name(phydev), phydev->phy_id,
phydev->drv->name);
- mac->phy_dev = phydev;
-
return 0;
}
-static int mtk_phy_connect(struct mtk_mac *mac)
+static int mtk_phy_connect(struct net_device *dev)
{
- struct mtk_eth *eth = mac->hw;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth;
struct device_node *np;
u32 val;
+ eth = mac->hw;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
if (!np && of_phy_is_fixed_link(mac->of_node))
if (!of_phy_register_fixed_link(mac->of_node))
@@ -244,6 +266,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ mac->trgmii = true;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -271,20 +295,23 @@ static int mtk_phy_connect(struct mtk_mac *mac)
val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- mtk_phy_connect_node(eth, mac, np);
- mac->phy_dev->autoneg = AUTONEG_ENABLE;
- mac->phy_dev->speed = 0;
- mac->phy_dev->duplex = 0;
+ /* couple phydev to net_device */
+ if (mtk_phy_connect_node(eth, mac, np))
+ goto err_phy;
+
+ dev->phydev->autoneg = AUTONEG_ENABLE;
+ dev->phydev->speed = 0;
+ dev->phydev->duplex = 0;
if (of_phy_is_fixed_link(mac->of_node))
- mac->phy_dev->supported |=
+ dev->phydev->supported |=
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
- mac->phy_dev->advertising = mac->phy_dev->supported |
+ dev->phydev->advertising = dev->phydev->supported |
ADVERTISED_Autoneg;
- phy_start_aneg(mac->phy_dev);
+ phy_start_aneg(dev->phydev);
of_node_put(np);
@@ -292,7 +319,7 @@ static int mtk_phy_connect(struct mtk_mac *mac)
err_phy:
of_node_put(np);
- dev_err(eth->dev, "invalid phy_mode\n");
+ dev_err(eth->dev, "%s: invalid phy\n", __func__);
return -EINVAL;
}
@@ -820,11 +847,51 @@ drop:
return NETDEV_TX_OK;
}
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return &eth->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = &eth->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
- int idx = ring->calc_idx;
+ struct mtk_rx_ring *ring;
+ int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
@@ -836,7 +903,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_addr_t dma_addr;
int mac = 0;
- idx = NEXT_RX_DESP_IDX(idx);
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -907,12 +978,13 @@ release_desc:
done++;
}
+rx_done:
if (done) {
/* make sure that all changes to the dma ring are flushed before
* we continue
*/
wmb();
- mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0);
+ mtk_update_rx_cpu_idx(eth);
}
return done;
@@ -1135,32 +1207,41 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
}
-static int mtk_rx_alloc(struct mtk_eth *eth)
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
+ int rx_data_len, rx_dma_size;
int i;
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
ring->buf_size = mtk_max_buf_size(ring->frag_size);
- ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
GFP_KERNEL);
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
ring->data[i] = netdev_alloc_frag(ring->frag_size);
if (!ring->data[i])
return -ENOMEM;
}
ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ rx_dma_size * sizeof(*ring->dma),
&ring->phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->dma)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
ring->data[i] + NET_SKB_PAD,
ring->buf_size,
@@ -1171,27 +1252,30 @@ static int mtk_rx_alloc(struct mtk_eth *eth)
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
- ring->calc_idx = MTK_DMA_SIZE - 1;
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0);
- mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX);
+ mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
return 0;
}
-static void mtk_rx_clean(struct mtk_eth *eth)
+static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
int i;
if (ring->data && ring->dma) {
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < ring->dma_size; i++) {
if (!ring->data[i])
continue;
if (!ring->dma[i].rxd1)
@@ -1208,13 +1292,275 @@ static void mtk_rx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
ring->dma = NULL;
}
}
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ int err = 0;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if (!(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ return err;
+}
+
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
@@ -1235,6 +1581,7 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
static int mtk_dma_init(struct mtk_eth *eth)
{
int err;
+ u32 i;
if (mtk_dma_busy_wait(eth))
return -EBUSY;
@@ -1250,10 +1597,21 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (err)
return err;
- err = mtk_rx_alloc(eth);
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
return err;
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
/* Enable random early drop and set drop threshold automatically */
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
MTK_QDMA_FC_THRES);
@@ -1278,7 +1636,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
- mtk_rx_clean(eth);
+ mtk_rx_clean(eth, 0);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, i);
+ }
+
kfree(eth->scratch_head);
}
@@ -1373,7 +1738,7 @@ static int mtk_open(struct net_device *dev)
}
atomic_inc(&eth->dma_refcnt);
- phy_start(mac->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -1408,7 +1773,7 @@ static int mtk_stop(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
netif_tx_disable(dev);
- phy_stop(mac->phy_dev);
+ phy_stop(dev->phydev);
/* only shutdown DMA if this is the last user */
if (!atomic_dec_and_test(&eth->dma_refcnt))
@@ -1420,6 +1785,7 @@ static int mtk_stop(struct net_device *dev)
napi_disable(&eth->rx_napi);
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
@@ -1548,7 +1914,7 @@ static int __init mtk_init(struct net_device *dev)
dev->addr_assign_type = NET_ADDR_RANDOM;
}
- return mtk_phy_connect(mac);
+ return mtk_phy_connect(dev);
}
static void mtk_uninit(struct net_device *dev)
@@ -1556,23 +1922,18 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- phy_disconnect(mac->phy_dev);
- mtk_mdio_cleanup(eth);
+ phy_disconnect(dev->phydev);
mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
- free_irq(eth->irq[1], dev);
- free_irq(eth->irq[2], dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
-
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
default:
break;
}
@@ -1617,7 +1978,7 @@ static void mtk_pending_work(struct work_struct *work)
if (!eth->mac[i] ||
of_phy_is_fixed_link(eth->mac[i]->of_node))
continue;
- err = phy_init_hw(eth->mac[i]->phy_dev);
+ err = phy_init_hw(eth->netdev[i]->phydev);
if (err)
dev_err(eth->dev, "%s: PHY init failed.\n",
eth->netdev[i]->name);
@@ -1677,35 +2038,26 @@ static int mtk_cleanup(struct mtk_eth *eth)
return 0;
}
-static int mtk_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
+ struct mtk_mac *mac = netdev_priv(ndev);
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
-
- return phy_ethtool_gset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_get(ndev->phydev, cmd);
}
-static int mtk_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_mac *mac = netdev_priv(ndev);
- if (cmd->phy_address != mac->phy_dev->mdio.addr) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- }
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_sset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -1739,7 +2091,7 @@ static int mtk_nway_reset(struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return genphy_restart_aneg(mac->phy_dev);
+ return genphy_restart_aneg(dev->phydev);
}
static u32 mtk_get_link(struct net_device *dev)
@@ -1750,11 +2102,11 @@ static u32 mtk_get_link(struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- err = genphy_update_link(mac->phy_dev);
+ err = genphy_update_link(dev->phydev);
if (err)
return ethtool_op_get_link(dev);
- return mac->phy_dev->link;
+ return dev->phydev->link;
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1800,8 +2152,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
}
}
+ data_src = (u64 *)hwstats;
+
do {
- data_src = (u64 *)hwstats;
data_dst = data;
start = u64_stats_fetch_begin_irq(&hwstats->syncp);
@@ -1810,9 +2163,65 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops mtk_ethtool_ops = {
- .get_settings = mtk_get_settings,
- .set_settings = mtk_set_settings,
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
.get_drvinfo = mtk_get_drvinfo,
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
@@ -1821,6 +2230,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -1835,6 +2246,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
@@ -1873,6 +2286,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw = eth;
mac->of_node = np;
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
@@ -1889,6 +2305,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
eth->netdev[id]->features |= MTK_HW_FEATURES;
@@ -1941,6 +2362,8 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->pctl);
}
+ eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
+
for (i = 0; i < 3; i++) {
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
@@ -2046,6 +2469,7 @@ static int mtk_remove(struct platform_device *pdev)
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
return 0;
}
@@ -2054,6 +2478,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7623-eth" },
{},
};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 79954b419b53..30031959d6de 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -39,7 +39,21 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
+#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
@@ -50,6 +64,9 @@
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
@@ -70,12 +87,29 @@
/* PDMA RX Base Pointer Register */
#define MTK_PRX_BASE_PTR0 0x900
+#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
/* PDMA RX Maximum Count Register */
#define MTK_PRX_MAX_CNT0 0x904
+#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
/* PDMA RX CPU Pointer Register */
#define MTK_PRX_CRX_IDX0 0x908
+#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 0x980
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1 0x984
+#define MTK_PDMA_LRO_CTRL_DW2 0x988
+#define MTK_PDMA_LRO_CTRL_DW3 0x98c
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
/* PDMA Global Configuration Register */
#define MTK_PDMA_GLO_CFG 0xa04
@@ -84,6 +118,7 @@
/* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
#define MTK_PDMA_DELAY_INT 0xa0c
@@ -94,10 +129,33 @@
/* PDMA Interrupt Mask Register */
#define MTK_PDMA_INT_MASK 0xa28
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
+
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 0xa50
#define MTK_PDMA_INT_GRP2 0xa54
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
+#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
+#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -132,7 +190,6 @@
/* QDMA Reset Index Register */
#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_PST_DRX_IDX0 BIT(16)
/* QDMA Delay Interrupt Register */
#define MTK_QDMA_DELAY_INT 0x1A0C
@@ -256,6 +313,30 @@
MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -266,7 +347,11 @@
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
-/*ethernet reset control register*/
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+
+/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
#define RSTCTRL_PPE BIT(31)
@@ -332,6 +417,7 @@ enum mtk_clks_map {
MTK_CLK_ESW,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_TRGPLL,
MTK_CLK_MAX
};
@@ -377,6 +463,12 @@ struct mtk_tx_ring {
atomic_t free_count;
};
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+};
+
/* struct mtk_rx_ring - This struct holds info describing a RX ring
* @dma: The descriptor ring
* @data: The memory pointed at by the ring
@@ -391,7 +483,10 @@ struct mtk_rx_ring {
dma_addr_t phys;
u16 frag_size;
u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
u16 calc_idx;
+ u32 crx_idx_reg;
};
/* currently no SoC has more than 2 macs */
@@ -439,9 +534,10 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
+ bool hwlro;
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
@@ -461,7 +557,8 @@ struct mtk_eth {
* @of_node: Our devicetree node
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
+ * @trgmii Indicate if the MAC uses TRGMII connected to internal
+ switch
*/
struct mtk_mac {
int id;
@@ -469,7 +566,9 @@ struct mtk_mac {
struct device_node *of_node;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+ bool trgmii;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f04a423ff79d..b1cef7a0f7ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -785,17 +785,23 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ int ret;
+
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_internal_err_ret_value(dev, op,
op_modifier);
+ down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
else
- return mlx4_cmd_poll(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+
+ up_read(&mlx4_priv(dev)->cmd.switch_sem);
+ return ret;
}
return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
@@ -1845,6 +1851,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos &&
+ vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
vp_oper->state.link_state == vp_admin->link_state &&
vp_oper->state.qos_vport == vp_admin->qos_vport)
return 0;
@@ -1903,6 +1910,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.link_state = vp_admin->link_state;
vp_oper->state.qos_vport = vp_admin->qos_vport;
@@ -1916,6 +1924,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
work->qos_vport = vp_oper->state.qos_vport;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
+ work->vlan_proto = vp_oper->state.vlan_proto;
work->priv = priv;
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
queue_work(priv->mfunc.master.comm_wq, &work->work);
@@ -1986,6 +1995,8 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
int min_port = find_first_bit(actv_ports.ports,
@@ -2000,12 +2011,26 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vp_oper->state = *vp_admin;
+ if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
+ slave_state->vst_qinq_supported) {
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+ }
+ vp_oper->state.link_state = vp_admin->link_state;
+ vp_oper->state.mac = vp_admin->mac;
+ vp_oper->state.spoofchk = vp_admin->spoofchk;
+ vp_oper->state.tx_rate = vp_admin->tx_rate;
+ vp_oper->state.qos_vport = vp_admin->qos_vport;
+ vp_oper->state.guid = vp_admin->guid;
+
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
+ vp_oper->state.default_vlan = MLX4_VGT;
+ vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
@@ -2086,6 +2111,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
+ slave_state[slave].vst_qinq_supported = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2353,6 +2379,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ s_state->vst_qinq_supported = false;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
@@ -2382,6 +2409,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
admin_vport->qos_vport =
MLX4_VPP_DEFAULT_VPORT;
oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+ admin_vport->vlan_proto = htons(ETH_P_8021Q);
+ oper_vport->vlan_proto = htons(ETH_P_8021Q);
vf_oper->vport[port].vlan_idx = NO_INDX;
vf_oper->vport[port].mac_idx = NO_INDX;
mlx4_set_random_admin_guid(dev, i, port);
@@ -2454,6 +2483,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
int flags = 0;
if (!priv->cmd.initialized) {
+ init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
@@ -2583,6 +2613,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
+ down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
@@ -2606,6 +2637,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);
return err;
}
@@ -2618,6 +2650,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
@@ -2626,6 +2659,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
kfree(priv->cmd.context);
up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
@@ -2937,10 +2971,13 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
+ __be16 proto)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *vf_admin;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_oper_state *vf_oper;
int slave;
if ((!mlx4_is_master(dev)) ||
@@ -2950,12 +2987,31 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if ((vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (proto == htons(ETH_P_8021AD) &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
+ return -EPROTONOSUPPORT;
+
+ if (proto != htons(ETH_P_8021Q) &&
+ proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ if ((proto == htons(ETH_P_8021AD)) &&
+ ((vlan == 0) || (vlan == MLX4_VGT)))
+ return -EINVAL;
+
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
+ slave_state = &priv->mfunc.master.slave_state[slave];
+ if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
+ (!slave_state->vst_qinq_supported)) {
+ mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
+ return -EPROTONOSUPPORT;
+ }
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
return -EPERM;
@@ -2965,6 +3021,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
else
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
+ vf_admin->vlan_proto = proto;
/* If rate was configured prior to VST, we saved the configured rate
* in vf_admin->rate and now, if priority supported we enforce the QoS
@@ -2973,7 +3030,12 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
vf_admin->tx_rate)
vf_admin->qos_vport = slave;
- if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
+ /* Try to activate new vf state without restart,
+ * this option is not supported while moving to VST QinQ mode.
+ */
+ if ((proto == htons(ETH_P_8021AD) &&
+ vf_oper->state.vlan_proto != proto) ||
+ mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_info(dev,
"updating vf %d port %d config will take effect on next VF restart\n",
vf, port);
@@ -3117,6 +3179,7 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
+ ivf->vlan_proto = s_info->vlan_proto;
if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
ivf->max_tx_rate = s_info->tx_rate;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 1494997c4f7e..08fc5fc56d43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -298,7 +298,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
mlx4_err(mdev, "ptp_clock_register failed\n");
- } else {
+ } else if (mdev->ptp_clock) {
mlx4_info(mdev, "registered PHC clock\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 62516f8369ba..7e703bed7b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2400,12 +2400,14 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
}
-static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+ return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
+ vlan_proto);
}
static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
@@ -3224,6 +3226,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mlx4_is_slave(mdev->dev)) {
+ bool vlan_offload_disabled;
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
@@ -3231,6 +3234,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
+ err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
+ &vlan_offload_disabled);
+ if (!err && vlan_offload_disabled) {
+ dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6758292311f4..f2e8beddcf44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
}
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
frag_info->dma_dir);
- if (dma_mapping_error(priv->ddev, dma)) {
+ if (unlikely(dma_mapping_error(priv->ddev, dma))) {
put_page(page);
return -ENOMEM;
}
@@ -108,7 +108,8 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
ring_alloc[i].page_size)
continue;
- if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
+ if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
+ frag_info, gfp)))
goto out;
}
@@ -585,7 +586,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
- if (!frags[nr].page)
+ if (unlikely(!frags[nr].page))
goto fail;
dma = be64_to_cpu(rx_desc->data[nr].addr);
@@ -625,7 +626,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
dma_addr_t dma;
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (!skb) {
+ if (unlikely(!skb)) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
@@ -736,7 +737,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
{
__wsum csum_pseudo_hdr = 0;
- if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
+ ipv6h->nexthdr == IPPROTO_HOPOPTS))
return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
@@ -769,7 +771,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
+ if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
return -1;
#endif
return 0;
@@ -796,10 +798,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
u64 timestamp;
bool l2_tunnel;
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
return 0;
- if (budget <= 0)
+ if (unlikely(budget <= 0))
return polled;
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
@@ -902,16 +904,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (!mlx4_en_xmit_frame(frags, dev,
+ if (likely(!mlx4_en_xmit_frame(frags, dev,
length, tx_index,
- &doorbell_pending))
+ &doorbell_pending)))
goto consumed;
- break;
+ goto xdp_drop; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
- if (mlx4_en_rx_recycle(ring, frags))
+xdp_drop:
+ if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed;
goto next;
}
@@ -1015,12 +1018,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (!skb) {
+ if (unlikely(!skb)) {
ring->dropped++;
goto next;
}
- if (unlikely(priv->validate_loopback)) {
+ if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, skb);
goto next;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index f613977455e0..cf8f8a72a801 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
return 0;
err_out_unmap:
- while (i >= 0)
- mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+ while (i > 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d728704d0c7b..f9cbc67f1694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -158,7 +158,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[31] = "Modifying loopback source checks using UPDATE_QP support",
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
- [34] = "DMFS Sniffer support (UC & MC)"
+ [34] = "DMFS Sniffer support (UC & MC)",
+ [35] = "QinQ VST mode support",
};
int i;
@@ -248,6 +249,72 @@ out:
return err;
}
+static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+ if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &vp_oper->vlan_idx);
+ if (err) {
+ vp_oper->vlan_idx = NO_INDX;
+ mlx4_warn(&priv->dev,
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
+ (int)(vp_oper->state.default_vlan),
+ vp_oper->vlan_idx, slave, port);
+ }
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+
+ return 0;
+}
+
+static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ slave_state = &priv->mfunc.master.slave_state[slave];
+
+ if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
+ (!slave_state->active))
+ return 0;
+
+ if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
+ vp_oper->state.default_vlan == vp_admin->default_vlan &&
+ vp_oper->state.default_qos == vp_admin->default_qos)
+ return 0;
+
+ if (!slave_state->vst_qinq_supported) {
+ /* Warn and revert the request to set vst QinQ mode */
+ vp_admin->vlan_proto = vp_oper->state.vlan_proto;
+ vp_admin->default_vlan = vp_oper->state.default_vlan;
+ vp_admin->default_qos = vp_oper->state.default_qos;
+
+ mlx4_warn(&priv->dev,
+ "Slave %d does not support VST QinQ mode\n", slave);
+ return 0;
+ }
+
+ err = mlx4_activate_vst_qinq(priv, slave, port);
+ return err;
+}
+
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -311,14 +378,18 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
-#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
#define QUERY_FUNC_CAP_PHV_BIT 0x40
+#define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20
+
+#define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30)
+#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
int converted_port = mlx4_slave_convert_port(
dev, slave, vhcr->in_modifier);
+ struct mlx4_vport_oper_state *vp_oper;
if (converted_port < 0)
return -EINVAL;
@@ -357,15 +428,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
- if (dev->caps.phv_bit[port]) {
- field = QUERY_FUNC_CAP_PHV_BIT;
- MLX4_PUT(outbox->buf, field,
- QUERY_FUNC_CAP_FLAGS0_OFFSET);
- }
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ err = mlx4_handle_vst_qinq(priv, slave, port);
+ if (err)
+ return err;
+
+ field = 0;
+ if (dev->caps.phv_bit[port])
+ field |= QUERY_FUNC_CAP_PHV_BIT;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
+
/* enable rdma and ethernet interfaces, new quota locations,
* and reserved lkey
*/
@@ -439,6 +519,10 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
+
+ if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
+ slave_state->vst_qinq_supported = true;
+
} else
err = -EINVAL;
@@ -454,10 +538,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
u32 size, qkey;
int err = 0, quotas = 0;
u32 in_modifier;
+ u32 slave_caps;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
- in_modifier = op_modifier ? gen_or_port :
+ slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
+ in_modifier = op_modifier ? gen_or_port : slave_caps;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -612,8 +698,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
- func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+ MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
@@ -690,6 +775,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
@@ -767,12 +853,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
dev_cap->reserved_mtts = 1 << (field >> 4);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
- dev_cap->max_mrw_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
dev_cap->reserved_mrws = 1 << (field & 0xf);
- MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
- dev_cap->max_mtt_seg = 1 << (field & 0x3f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
dev_cap->num_sys_eqs = size & 0xfff;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
@@ -857,6 +939,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
dev_cap->max_sq_desc_sz = size;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
+ if (field & 0x1)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
dev_cap->max_qp_per_mcg = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
@@ -2914,7 +2999,7 @@ int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
if (!err)
- *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
return err;
}
EXPORT_SYMBOL(get_phv_bit);
@@ -2938,6 +3023,22 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
}
EXPORT_SYMBOL(set_phv_bit);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled)
+{
+ struct mlx4_func_cap func_cap;
+ int err;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *vlan_offload_disabled =
+ !!(func_cap.flags0 &
+ QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
+
void mlx4_replace_zero_macs(struct mlx4_dev *dev)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cdbd76f10ced..5343a0599253 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -80,9 +80,7 @@ struct mlx4_dev_cap {
int max_eqs;
int num_sys_eqs;
int reserved_mtts;
- int max_mrw_sz;
int reserved_mrws;
- int max_mtt_seg;
int max_requester_per_qp;
int max_responder_per_qp;
int max_rdma_global;
@@ -152,7 +150,7 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u32 reserved_lkey;
u8 physical_port;
- u8 port_flags;
+ u8 flags0;
u8 flags1;
u64 phys_port_id;
u32 extra_flags;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75dd2e3d3059..7183ac4135d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_unregister(&info->devlink_port);
+
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c9d7fc5159f2..e4878f31e45d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,6 +46,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <net/devlink.h>
+#include <linux/rwsem.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -482,6 +483,7 @@ struct mlx4_slave_state {
u8 init_port_mask;
bool active;
bool old_vlan_api;
+ bool vst_qinq_supported;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -507,6 +509,7 @@ struct mlx4_vport_state {
u64 mac;
u16 default_vlan;
u8 default_qos;
+ __be16 vlan_proto;
u32 tx_rate;
bool spoofchk;
u32 link_state;
@@ -627,6 +630,7 @@ struct mlx4_cmd {
struct mutex slave_cmd_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct rw_semaphore switch_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -655,6 +659,7 @@ struct mlx4_vf_immed_vlan_work {
u8 qos_vport;
u16 vlan_id;
u16 orig_vlan_id;
+ __be16 vlan_proto;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8b81114bdc72..84d7857ccc27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -790,10 +790,22 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
- qpc->pri_path.vlan_control |=
- MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
+ /* vst QinQ should block untagged on TX,
+ * but cvlan is in payload and phv is set so
+ * hw see it as untagged. Block tagged instead.
+ */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ } else { /* vst 802.1Q */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ }
} else { /* priority tagged */
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
@@ -802,7 +814,11 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
- qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ qpc->pri_path.fl |= MLX4_FL_SV;
+ else
+ qpc->pri_path.fl |= MLX4_FL_CV;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
@@ -5238,6 +5254,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
@@ -5266,7 +5283,12 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
- else
+ else if (work->vlan_proto == htons(ETH_P_8021AD))
+ vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ else /* vst 802.1Q */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
@@ -5311,7 +5333,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
- qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (work->vlan_proto == htons(ETH_P_8021AD))
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
+ else
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 67146624eb58..f44d089e2ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -45,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- spin_lock(&srq_table->lock);
-
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+ rcu_read_unlock();
if (srq)
atomic_inc(&srq->refcount);
-
- spin_unlock(&srq_table->lock);
-
- if (!srq) {
+ else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
@@ -301,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- unsigned long flags;
- spin_lock_irqsave(&srq_table->lock, flags);
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
- spin_unlock_irqrestore(&srq_table->lock, flags);
+ rcu_read_unlock();
return srq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 7dd4763e726e..460363b66cb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -65,6 +65,8 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+#define MLX5_RX_HEADROOM NET_SKB_PAD
+
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_WQE_SZ 18
@@ -99,6 +101,18 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
+#define MLX5E_ICOSQ_MAX_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_IHS_DS_COUNT \
+ DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT \
+ (MLX5E_XDP_IHS_DS_COUNT + \
+ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_WQEBBS \
+ DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
+
#define MLX5E_NUM_MAIN_GROUPS 9
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -302,10 +316,20 @@ struct mlx5e_page_cache {
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
- u32 wqe_sz;
- struct sk_buff **skb;
- struct mlx5e_mpw_info *wqe_info;
- void *mtt_no_align;
+
+ union {
+ struct mlx5e_dma_info *dma_info;
+ struct {
+ struct mlx5e_mpw_info *info;
+ void *mtt_no_align;
+ u32 mtt_offset;
+ } mpwqe;
+ };
+ struct {
+ u8 page_order;
+ u32 wqe_sz; /* wqe data buffer size */
+ u8 map_dir; /* dma map direction */
+ } buff;
__be32 mkey_be;
struct device *pdev;
@@ -321,9 +345,9 @@ struct mlx5e_rq {
unsigned long state;
int ix;
- u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct bpf_prog *xdp_prog;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -370,11 +394,17 @@ enum {
MLX5E_SQ_STATE_BF_ENABLE,
};
-struct mlx5e_ico_wqe_info {
+struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
+enum mlx5e_sq_type {
+ MLX5E_SQ_TXQ,
+ MLX5E_SQ_ICO,
+ MLX5E_SQ_XDP
+};
+
struct mlx5e_sq {
/* data path */
@@ -392,10 +422,20 @@ struct mlx5e_sq {
struct mlx5e_cq cq;
- /* pointers to per packet info: write@xmit, read@completion */
- struct sk_buff **skb;
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
+ /* pointers to per tx element info: write@xmit, read@completion */
+ union {
+ struct {
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } txq;
+ struct mlx5e_sq_wqe_info *ico_wqe;
+ struct {
+ struct mlx5e_sq_wqe_info *wqe_info;
+ struct mlx5e_dma_info *di;
+ bool doorbell;
+ } xdp;
+ } db;
/* read only */
struct mlx5_wq_cyc wq;
@@ -417,8 +457,8 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
- struct mlx5e_ico_wqe_info *ico_wqe_info;
u32 rate_limit;
+ u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -434,8 +474,10 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
+ struct mlx5e_sq xdp_sq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq icosq; /* internal control operations */
+ bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -617,6 +659,7 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct bpf_prog *xdp_prog;
/* priv data path fields - end */
unsigned long state;
@@ -663,7 +706,7 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
@@ -764,7 +807,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{
- return rq->mpwqe_mtt_offset +
+ return rq->mpwqe.mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
@@ -826,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 847a8f3ac2b2..13dc388667b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -273,7 +273,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
- if (IS_ERR_OR_NULL(tstamp->ptp)) {
+ if (IS_ERR(tstamp->ptp)) {
mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
PTR_ERR(tstamp->ptp));
tstamp->ptp = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8595b507e200..7eaf38020a8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <linux/bpf.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -50,7 +51,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
u16 max_inline;
u8 min_inline_mode;
- bool icosq;
+ enum mlx5e_sq_type type;
};
struct mlx5e_cq_param {
@@ -63,12 +64,55 @@ struct mlx5e_cq_param {
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
struct mlx5e_cq_param icosq_cq;
};
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+{
+ priv->params.rq_wq_type = rq_type;
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ mlx5_core_info(priv->mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+}
+
+static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+{
+ u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
+ !priv->xdp_prog ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+ mlx5e_set_rq_type_params(priv, rq_type);
+}
+
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -136,6 +180,9 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
@@ -314,7 +361,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
struct mlx5_wqe_data_seg *dseg = &wqe->data;
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
@@ -342,21 +389,21 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
int i;
- rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->wqe_info)
+ rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->mpwqe.info)
goto err_out;
/* We allocate more than mtt_sz as we will align the pointer */
- rq->mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+ rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
cpu_to_node(c->cpu));
- if (unlikely(!rq->mtt_no_align))
+ if (unlikely(!rq->mpwqe.mtt_no_align))
goto err_free_wqe_info;
for (i = 0; i < wq_sz; i++) {
- struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
- wi->umr.mtt = PTR_ALIGN(rq->mtt_no_align + i * mtt_alloc,
+ wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
MLX5_UMR_ALIGN);
wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
PCI_DMA_TODEVICE);
@@ -370,14 +417,14 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
err_unmap_mtts:
while (--i >= 0) {
- struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
PCI_DMA_TODEVICE);
}
- kfree(rq->mtt_no_align);
+ kfree(rq->mpwqe.mtt_no_align);
err_free_wqe_info:
- kfree(rq->wqe_info);
+ kfree(rq->mpwqe.info);
err_out:
return -ENOMEM;
@@ -390,13 +437,23 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
int i;
for (i = 0; i < wq_sz; i++) {
- struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
PCI_DMA_TODEVICE);
}
- kfree(rq->mtt_no_align);
- kfree(rq->wqe_info);
+ kfree(rq->mpwqe.mtt_no_align);
+ kfree(rq->mpwqe.info);
+}
+
+static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+
+ if (rep && rep->vport != FDB_UPLINK_VPORT)
+ return true;
+
+ return false;
}
static int mlx5e_create_rq(struct mlx5e_channel *c,
@@ -408,6 +465,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
+ u32 frag_sz;
+ int npages;
int wq_sz;
int err;
int i;
@@ -430,41 +489,66 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
+ rq->xdp_prog = priv->xdp_prog;
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ if (rq->xdp_prog)
+ rq->buff.map_dir = DMA_BIDIRECTIONAL;
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ if (mlx5e_is_vf_vport_rep(priv)) {
+ err = -EINVAL;
+ goto err_rq_wq_destroy;
+ }
+
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe_mtt_offset = c->ix *
+ rq->mpwqe.mtt_offset = c->ix *
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
- rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
- byte_count = rq->wqe_sz;
+
+ rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->buff.wqe_sz;
rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
err = mlx5e_rq_alloc_mpwqe_info(rq, c);
if (err)
goto err_rq_wq_destroy;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
+ rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->dma_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
+ if (mlx5e_is_vf_vport_rep(priv))
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
+ else
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->wqe_sz = (priv->params.lro_en) ?
+ rq->buff.wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
- byte_count = rq->wqe_sz;
+ byte_count = rq->buff.wqe_sz;
+
+ /* calc the required page order */
+ frag_sz = MLX5_RX_HEADROOM +
+ byte_count /* packet data */ +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frag_sz = SKB_DATA_ALIGN(frag_sz);
+
+ npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+ rq->buff.page_order = order_base_2(npages);
+
byte_count |= MLX5_HW_START_PADDING;
rq->mkey_be = c->mkey_be;
}
@@ -482,6 +566,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
+ if (rq->xdp_prog)
+ bpf_prog_add(rq->xdp_prog, 1);
+
return 0;
err_rq_wq_destroy:
@@ -494,12 +581,15 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
int i;
+ if (rq->xdp_prog)
+ bpf_prog_put(rq->xdp_prog);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_mpwqe_info(rq);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->skb);
+ kfree(rq->dma_info);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
@@ -641,7 +731,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
- mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
@@ -676,8 +766,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -701,26 +791,65 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_destroy_rq(rq);
}
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
{
- kfree(sq->wqe_info);
- kfree(sq->dma_fifo);
- kfree(sq->skb);
+ kfree(sq->db.xdp.di);
+ kfree(sq->db.xdp.wqe_info);
}
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
- sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
- numa);
- sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
- numa);
+ sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+ GFP_KERNEL, numa);
+ sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
+ mlx5e_free_sq_xdp_db(sq);
+ return -ENOMEM;
+ }
- if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
- mlx5e_free_sq_db(sq);
+ return 0;
+}
+
+static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.ico_wqe);
+}
+
+static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+{
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+ sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.ico_wqe)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.txq.wqe_info);
+ kfree(sq->db.txq.dma_fifo);
+ kfree(sq->db.txq.skb);
+}
+
+static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
+ GFP_KERNEL, numa);
+ sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
+ GFP_KERNEL, numa);
+ sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
+ GFP_KERNEL, numa);
+ if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
+ mlx5e_free_sq_txq_db(sq);
return -ENOMEM;
}
@@ -729,6 +858,46 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
return 0;
}
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_sq_txq_db(sq);
+ break;
+ case MLX5E_SQ_ICO:
+ mlx5e_free_sq_ico_db(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_sq_xdp_db(sq);
+ break;
+ }
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ return mlx5e_alloc_sq_txq_db(sq, numa);
+ case MLX5E_SQ_ICO:
+ return mlx5e_alloc_sq_ico_db(sq, numa);
+ case MLX5E_SQ_XDP:
+ return mlx5e_alloc_sq_xdp_db(sq, numa);
+ }
+
+ return 0;
+}
+
+static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+{
+ switch (sq_type) {
+ case MLX5E_SQ_ICO:
+ return MLX5E_ICOSQ_MAX_WQEBBS;
+ case MLX5E_SQ_XDP:
+ return MLX5E_XDP_TX_WQEBBS;
+ }
+ return MLX5_SEND_WQE_MAX_WQEBBS;
+}
+
static int mlx5e_create_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
@@ -741,6 +910,13 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
+ sq->type = param->type;
+ sq->pdev = c->pdev;
+ sq->tstamp = &priv->tstamp;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -769,18 +945,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- if (param->icosq) {
- u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-
- sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
- wq_sz,
- GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!sq->ico_wqe_info) {
- err = -ENOMEM;
- goto err_free_sq_db;
- }
- } else {
+ if (sq->type == MLX5E_SQ_TXQ) {
int txq_ix;
txq_ix = c->ix + tc * priv->params.num_channels;
@@ -788,19 +953,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
priv->txq_to_sq_map[txq_ix] = sq;
}
- sq->pdev = c->pdev;
- sq->tstamp = &priv->tstamp;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
return 0;
-err_free_sq_db:
- mlx5e_free_sq_db(sq);
-
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -815,7 +972,6 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
- kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -844,11 +1000,12 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
+ 0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -963,12 +1120,14 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
netif_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
- if (mlx5e_sq_has_room_for(sq, 1))
+ if (mlx5e_sq_has_room_for(sq, 1)) {
+ sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
mlx5e_send_nop(sq, true);
+ }
}
mlx5e_disable_sq(sq);
- mlx5e_free_tx_descs(sq);
+ mlx5e_free_sq_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -1329,14 +1488,31 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
}
}
+ if (priv->xdp_prog) {
+ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+ priv->params.tx_cq_moderation);
+ if (err)
+ goto err_close_sqs;
+
+ err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
+ if (err) {
+ mlx5e_close_cq(&c->xdp_sq.cq);
+ goto err_close_sqs;
+ }
+ }
+
+ c->xdp = !!priv->xdp_prog;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
- goto err_close_sqs;
+ goto err_close_xdp_sq;
netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
+err_close_xdp_sq:
+ mlx5e_close_sq(&c->xdp_sq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1365,9 +1541,13 @@ err_napi_del:
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
+ if (c->xdp)
+ mlx5e_close_sq(&c->xdp_sq);
mlx5e_close_sqs(c);
mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->xdp_sq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
@@ -1441,6 +1621,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
param->max_inline = priv->params.tx_max_inline;
param->min_inline_mode = priv->params.tx_min_inline_mode;
+ param->type = MLX5E_SQ_TXQ;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1514,7 +1695,22 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- param->icosq = true;
+ param->type = MLX5E_SQ_ICO;
+}
+
+static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+
+ param->max_inline = priv->params.tx_max_inline;
+ /* FOR XDP SQs will support only L2 inline mode */
+ param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+ param->type = MLX5E_SQ_XDP;
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
@@ -1523,6 +1719,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_chan
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
@@ -2720,11 +2917,15 @@ static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
}
-static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
vlan, qos);
}
@@ -2901,6 +3102,106 @@ static void mlx5e_tx_timeout(struct net_device *dev)
schedule_work(&priv->tx_timeout_work);
}
+static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ int err = 0;
+ bool reset, was_opened;
+ int i;
+
+ mutex_lock(&priv->state_lock);
+
+ if ((netdev->features & NETIF_F_LRO) && prog) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ /* no need for full reset when exchanging programs */
+ reset = (!priv->xdp_prog || !prog);
+
+ if (was_opened && reset)
+ mlx5e_close_locked(netdev);
+
+ /* exchange programs */
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (prog)
+ bpf_prog_add(prog, 1);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (reset) /* change RQ type according to priv->xdp_prog */
+ mlx5e_set_rq_priv_params(priv);
+
+ if (was_opened && reset)
+ mlx5e_open_locked(netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
+ goto unlock;
+
+ /* exchanging programs w/o reset, we update ref counts on behalf
+ * of the channels RQs here.
+ */
+ bpf_prog_add(prog, priv->params.num_channels);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ struct mlx5e_channel *c = priv->channel[i];
+
+ set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ napi_synchronize(&c->napi);
+ /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
+
+ old_prog = xchg(&c->rq.xdp_prog, prog);
+
+ clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ /* napi_schedule in case we have missed anything */
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+ napi_schedule(&c->napi);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static bool mlx5e_xdp_attached(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return !!priv->xdp_prog;
+}
+
+static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mlx5e_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = mlx5e_xdp_attached(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
+ * reenabling interrupts.
+ */
+static void mlx5e_netpoll(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ napi_schedule(&priv->channel[i]->napi);
+}
+#endif
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -2920,6 +3221,10 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx5e_netpoll,
+#endif
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2951,6 +3256,10 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mlx5e_netpoll,
+#endif
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -3025,13 +3334,6 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
indirection_rqt[i] = i % num_channels;
}
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
-}
-
static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
{
enum pcie_link_width width;
@@ -3111,11 +3413,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->params.log_sq_size =
- MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
priv->params.rx_cqe_compress_admin = false;
@@ -3128,33 +3432,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.rx_cqe_compress_admin =
cqe_compress_heuristic(link_speed, pci_bw);
}
-
priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
- switch (priv->params.rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz =
- priv->params.rx_cqe_compress ?
- MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
- MLX5_MPWRQ_LOG_STRIDE_SIZE;
- priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- priv->params.mpwqe_log_stride_sz;
+ mlx5e_set_rq_priv_params(priv);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
priv->params.lro_en = true;
- break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- }
-
- mlx5_core_info(mdev,
- "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(priv->params.log_rq_size),
- BIT(priv->params.mpwqe_log_stride_sz),
- priv->params.rx_cqe_compress_admin);
-
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
@@ -3174,19 +3456,16 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
- priv->params.lro_wqe_sz =
- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
+ /* Extra room needed for build_skb */
+ MLX5_RX_HEADROOM -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */
MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
-
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
#endif
@@ -3490,9 +3769,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
- rep.vport = 0;
+ rep.vport = FDB_UPLINK_VPORT;
rep.priv_data = priv;
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
}
@@ -3631,7 +3910,7 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport;
ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dc8677933f76..c6de6fba5843 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <net/busy_poll.h>
#include "en.h"
#include "en_tc.h"
+#include "eswitch.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -179,50 +180,99 @@ unlock:
mutex_unlock(&priv->state_lock);
}
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
+
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ struct mlx5e_page_cache *cache = &rq->page_cache;
+ u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
- skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
- if (unlikely(!skb))
- return -ENOMEM;
+ if (tail_next == cache->head) {
+ rq->stats.cache_full++;
+ return false;
+ }
- dma_addr = dma_map_single(rq->pdev,
- /* hw start padding */
- skb->data,
- /* hw end padding */
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ cache->page_cache[cache->tail] = *dma_info;
+ cache->tail = tail_next;
+ return true;
+}
- if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
- goto err_free_skb;
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
- *((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr);
+ if (unlikely(cache->head == cache->tail)) {
+ rq->stats.cache_empty++;
+ return false;
+ }
- rq->skb[ix] = skb;
+ if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ rq->stats.cache_busy++;
+ return false;
+ }
+
+ *dma_info = cache->page_cache[cache->head];
+ cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
+ rq->stats.cache_reuse++;
+
+ dma_sync_single_for_device(rq->pdev, dma_info->addr,
+ RQ_PAGE_SIZE(rq),
+ DMA_FROM_DEVICE);
+ return true;
+}
+
+static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct page *page;
+
+ if (mlx5e_rx_cache_get(rq, dma_info))
+ return 0;
+
+ page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ dma_info->page = page;
+ dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+ put_page(page);
+ return -ENOMEM;
+ }
return 0;
+}
-err_free_skb:
- dev_kfree_skb(skb);
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle)
+{
+ if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
+ return;
+
+ dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+ rq->buff.map_dir);
+ put_page(dma_info->page);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
+ return -ENOMEM;
- return -ENOMEM;
+ wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+ return 0;
}
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- struct sk_buff *skb = rq->skb[ix];
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
- if (skb) {
- rq->skb[ix] = NULL;
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
+ mlx5e_page_release(rq, di, true);
}
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
@@ -279,7 +329,7 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_sq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
@@ -288,8 +338,8 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true);
}
@@ -299,90 +349,17 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
- sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
-static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- struct mlx5e_page_cache *cache = &rq->page_cache;
- u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
-
- if (tail_next == cache->head) {
- rq->stats.cache_full++;
- return false;
- }
-
- cache->page_cache[cache->tail] = *dma_info;
- cache->tail = tail_next;
- return true;
-}
-
-static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- struct mlx5e_page_cache *cache = &rq->page_cache;
-
- if (unlikely(cache->head == cache->tail)) {
- rq->stats.cache_empty++;
- return false;
- }
-
- if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
- rq->stats.cache_busy++;
- return false;
- }
-
- *dma_info = cache->page_cache[cache->head];
- cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
- rq->stats.cache_reuse++;
-
- dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
- DMA_FROM_DEVICE);
- return true;
-}
-
-static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- struct page *page;
-
- if (mlx5e_rx_cache_get(rq, dma_info))
- return 0;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return -ENOMEM;
-
- dma_info->page = page;
- dma_info->addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- put_page(page);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
- bool recycle)
-{
- if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
- return;
-
- dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE);
- put_page(dma_info->page);
-}
-
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_rx_wqe *wqe,
u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
int err;
@@ -436,7 +413,7 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
- mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
@@ -448,7 +425,7 @@ void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
mlx5_wq_ll_update_db_record(wq);
}
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
int err;
@@ -462,7 +439,7 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
mlx5e_free_rx_mpwqe(rq, wi);
}
@@ -653,40 +630,207 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq->stats.packets++;
rq->stats.bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
- napi_gro_receive(rq->cq.napi, skb);
+}
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ unsigned int data_offset,
+ int len)
+{
+ struct mlx5e_sq *sq = &rq->channel->xdp_sq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
+ unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
+ void *data = page_address(di->page) + data_offset;
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
+ if (sq->db.xdp.doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->db.xdp.doorbell = false;
+ }
+ rq->stats.xdp_tx_full++;
+ mlx5e_page_release(rq, di, true);
+ return;
+ }
+
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
+ PCI_DMA_TODEVICE);
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* copy the inline part */
+ memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+
+ sq->db.xdp.di[pi] = *di;
+ wi->opcode = MLX5_OPCODE_SEND;
+ wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
+ sq->pc += MLX5E_XDP_TX_WQEBBS;
+
+ sq->db.xdp.doorbell = true;
+ rq->stats.xdp_tx++;
+}
+
+/* returns true if packet was consumed by xdp */
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ const struct bpf_prog *prog,
+ struct mlx5e_dma_info *di,
+ void *data, u16 len)
+{
+ struct xdp_buff xdp;
+ u32 act;
+
+ if (!prog)
+ return false;
+
+ xdp.data = data;
+ xdp.data_end = xdp.data + len;
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ rq->stats.xdp_drop++;
+ mlx5e_page_release(rq, di, true);
+ return true;
+ }
+}
+
+static inline
+struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ u16 wqe_counter, u32 cqe_bcnt)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
+ struct mlx5e_dma_info *di;
+ struct sk_buff *skb;
+ void *va, *data;
+
+ di = &rq->dma_info[wqe_counter];
+ va = page_address(di->page);
+ data = va + MLX5_RX_HEADROOM;
+
+ dma_sync_single_range_for_cpu(rq->pdev,
+ di->addr,
+ MLX5_RX_HEADROOM,
+ rq->buff.wqe_sz,
+ DMA_FROM_DEVICE);
+ prefetch(data);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
+ return NULL; /* page/packet was consumed by XDP */
+
+ skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ /* queue up for recycling ..*/
+ page_ref_inc(di->page);
+ mlx5e_page_release(rq, di, true);
+
+ skb_reserve(skb, MLX5_RX_HEADROOM);
+ skb_put(skb, cqe_bcnt);
+
+ return skb;
}
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
- struct sk_buff *skb;
__be16 wqe_counter_be;
+ struct sk_buff *skb;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
-
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
goto wq_ll_pop;
- }
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb_put(skb, cqe_bcnt);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
+ goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (rep->vlan && skb_vlan_tag_present(skb))
+ skb_vlan_pop(skb);
+
+ napi_gro_receive(rq->cq.napi, skb);
+
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -734,7 +878,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
struct sk_buff *skb;
u16 cqe_bcnt;
@@ -764,6 +908,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
@@ -776,6 +921,7 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
@@ -802,6 +948,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
+ if (xdp_sq->db.xdp.doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdp_sq);
+ xdp_sq->db.xdp.doorbell = false;
+ }
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 6af8d79e8c2a..57452fdc5154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -65,6 +65,9 @@ struct mlx5e_sw_stats {
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_full;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
@@ -100,6 +103,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -278,6 +284,9 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_tx_full;
u64 wqe_err;
u64 mpwqe_filler;
u64 buff_alloc_err;
@@ -295,6 +304,9 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 22cfc4ac1837..ce8c54d18906 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -47,6 +48,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
struct mlx5_flow_rule *rule;
+ struct mlx5_esw_flow_attr *attr;
};
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
@@ -114,27 +116,30 @@ err_create_ft:
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- u32 action, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- u32 src_vport;
+ int err;
- if (rep->vport) /* set source vport for the flow */
- src_vport = rep->vport;
- else
- src_vport = FDB_UPLINK_VPORT;
+ err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (err)
+ return ERR_PTR(err);
- return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_rule *rule)
+ struct mlx5_flow_rule *rule,
+ struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(rule);
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ mlx5_eswitch_del_vlan_action(esw, attr);
+
mlx5_del_flow_rule(rule);
mlx5_fc_destroy(priv->mdev, counter);
@@ -159,6 +164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
@@ -222,6 +228,24 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
key->src);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+ }
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -361,7 +385,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *dest_vport)
+ struct mlx5_esw_flow_attr *attr)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -369,17 +393,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tc_no_actions(exts))
return -EINVAL;
- *action = 0;
+ memset(attr, 0, sizeof(*attr));
+ attr->in_rep = priv->ppriv;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (*action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
- *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -387,7 +408,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- struct mlx5_eswitch_rep *out_rep;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -397,13 +417,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
out_priv = netdev_priv(out_dev);
- out_rep = out_priv->ppriv;
- if (out_rep->vport == 0)
- *dest_vport = FDB_UPLINK_VPORT;
- else
- *dest_vport = out_rep->vport;
- *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->out_rep = out_priv->ppriv;
+ continue;
+ }
+
+ if (is_tcf_vlan(a)) {
+ if (tcf_vlan_action(a) == VLAN_F_POP) {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ attr->vlan = tcf_vlan_push_vid(a);
+ }
continue;
}
@@ -417,18 +446,29 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err = 0;
- u32 flow_tag, action, dest_vport = 0;
+ bool fdb_flow = false;
+ u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
+ struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ fdb_flow = true;
+
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
- if (flow)
+ if (flow) {
old = flow->rule;
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ old_attr = flow->attr;
+ } else {
+ if (fdb_flow)
+ flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
+ GFP_KERNEL);
+ else
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ }
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
@@ -442,11 +482,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err < 0)
goto err_free;
- if (esw && esw->mode == SRIOV_OFFLOADS) {
- err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
+ if (fdb_flow) {
+ flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
+ err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
} else {
err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
if (err < 0)
@@ -465,7 +506,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto err_del_rule;
if (old)
- mlx5e_tc_del_flow(priv, old);
+ mlx5e_tc_del_flow(priv, old, old_attr);
goto out;
@@ -493,7 +534,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
@@ -550,7 +591,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg;
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index eb0e72537f10..70a717382357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -52,7 +52,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
- sq->skb[pi] = NULL;
sq->pc++;
sq->stats.nop++;
@@ -82,15 +81,17 @@ static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
+ u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
+
+ sq->db.txq.dma_fifo[i].addr = addr;
+ sq->db.txq.dma_fifo[i].size = size;
+ sq->db.txq.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
{
- return &sq->dma_fifo[i & sq->dma_fifo_mask];
+ return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
@@ -221,7 +222,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
+ struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
@@ -341,7 +342,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->skb[pi] = skb;
+ sq->db.txq.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs;
@@ -368,8 +369,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
/* fill sq edge with nops to avoid wqe wrap around */
- while ((sq->pc & wq->sz_m1) > sq->edge)
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->db.txq.skb[pi] = NULL;
mlx5e_send_nop(sq, false);
+ }
if (bf)
sq->bf_budget--;
@@ -442,8 +445,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (unlikely(!skb)) { /* nop */
sqcc++;
@@ -492,7 +495,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -501,8 +504,8 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
@@ -520,3 +523,37 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs;
}
}
+
+static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (wi->opcode == MLX5_OPCODE_NOP) {
+ sq->cc++;
+ continue;
+ }
+
+ sq->cc += wi->num_wqebbs;
+
+ mlx5e_page_release(&sq->channel->rq, di, false);
+ }
+}
+
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_txq_sq_descs(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_xdp_sq_descs(sq);
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 08d8b0c91f07..5703f19a6a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -72,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
do {
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
- struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
mlx5_cqwq_pop(&cq->wq);
sqcc += icowi->num_wqebbs;
@@ -105,6 +105,66 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
}
+static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return false;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
+ sqcc++;
+ continue;
+ }
+
+ sqcc += wi->num_wqebbs;
+ /* Recycle RX page */
+ mlx5e_page_release(&sq->channel->rq, di, true);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -121,6 +181,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+ if (c->xdp)
+ busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
+
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 654b76ff962f..abbf2c369923 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -81,9 +81,6 @@ enum {
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
@@ -130,7 +127,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 vlan, u8 qos, bool set)
+ u16 vlan, u8 qos, u8 set_flags)
{
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
@@ -138,14 +135,18 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP;
- esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
- vport, vlan, qos, set);
- if (set) {
+ esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
+ vport, vlan, qos, set_flags);
+
+ if (set_flags & SET_VLAN_STRIP)
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
+
+ if (set_flags & SET_VLAN_INSERT) {
/* insert only if no vlan in packet */
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert, 1);
+
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in,
@@ -1492,6 +1493,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ esw->mode = SRIOV_NONE;
return err;
}
@@ -1780,25 +1782,21 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
return 0;
}
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
- int vport, u16 vlan, u8 qos)
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags)
{
struct mlx5_vport *evport;
int err = 0;
- int set = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
return -EINVAL;
- if (vlan || qos)
- set = 1;
-
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
goto unlock;
@@ -1816,6 +1814,17 @@ unlock:
return err;
}
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos)
+{
+ u8 set_flags = 0;
+
+ if (vlan || qos)
+ set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+ return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+}
+
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6855783f3bb3..2e2938e08cda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -157,6 +157,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule;
+ int vlan_push_pop_refcount;
} offloads;
};
};
@@ -178,11 +179,14 @@ struct mlx5_eswitch_rep {
void (*unload)(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
u16 vport;
- struct mlx5_flow_rule *vport_rx_rule;
+ u8 hw_id[ETH_ALEN];
void *priv_data;
+
+ struct mlx5_flow_rule *vport_rx_rule;
struct list_head vport_sqs_list;
+ u16 vlan;
+ u32 vlan_refcount;
bool valid;
- u8 hw_id[ETH_ALEN];
};
struct mlx5_esw_offload {
@@ -209,6 +213,9 @@ struct mlx5_eswitch {
int mode;
};
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
@@ -234,14 +241,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
struct mlx5_flow_spec;
+struct mlx5_esw_flow_attr;
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport);
+ struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+enum {
+ SET_VLAN_STRIP = BIT(0),
+ SET_VLAN_INSERT = BIT(1)
+};
+
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+
+struct mlx5_esw_flow_attr {
+ struct mlx5_eswitch_rep *in_rep;
+ struct mlx5_eswitch_rep *out_rep;
+
+ int action;
+ u16 vlan;
+ bool vlan_handled;
+};
+
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num);
@@ -251,9 +276,17 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport);
+ int vport_index);
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3dc83a9459a4..c55ad8d00c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -46,19 +46,22 @@ enum {
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
void *misc;
+ int action;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ action = attr->action;
+
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = dst_vport;
+ dest.vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
@@ -69,7 +72,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -86,6 +89,186 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
}
+static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vf_vport, err = 0;
+
+ esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
+ for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
+ rep = &esw->offloads.vport_reps[vf_vport];
+ if (!rep->valid)
+ continue;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static struct mlx5_eswitch_rep *
+esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push)
+ vport = in_rep;
+ else if (pop)
+ vport = out_rep;
+ else
+ vport = in_rep;
+
+ return vport;
+}
+
+static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
+ bool push, bool pop, bool fwd)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep;
+
+ if ((push || pop) && !fwd)
+ goto out_notsupp;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push && in_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ if (pop && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
+ if (!push && !pop && fwd)
+ if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* protects against (1) setting rules with different vlans to push and
+ * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
+ */
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+ goto out_notsupp;
+
+ return 0;
+
+out_notsupp:
+ return -ENOTSUPP;
+}
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ if (err)
+ return err;
+
+ attr->vlan_handled = false;
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
+ vport->vlan_refcount++;
+ attr->vlan_handled = true;
+ }
+
+ return 0;
+ }
+
+ if (!push && !pop)
+ return 0;
+
+ if (!(offloads->vlan_push_pop_refcount)) {
+ /* it's the 1st vlan rule, apply global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+ offloads->vlan_push_pop_refcount++;
+
+ if (push) {
+ if (vport->vlan_refcount)
+ goto skip_set_push;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+ SET_VLAN_INSERT | SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ vport->vlan = attr->vlan;
+skip_set_push:
+ vport->vlan_refcount++;
+ }
+out:
+ if (!err)
+ attr->vlan_handled = true;
+ return err;
+}
+
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ if (!attr->vlan_handled)
+ return 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT)
+ vport->vlan_refcount--;
+
+ return 0;
+ }
+
+ if (push) {
+ vport->vlan_refcount--;
+ if (vport->vlan_refcount)
+ goto skip_unset_push;
+
+ vport->vlan = 0;
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
+ 0, 0, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+
+skip_unset_push:
+ offloads->vlan_push_pop_refcount--;
+ if (offloads->vlan_push_pop_refcount)
+ return 0;
+
+ /* no more vlan rules, stop global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, 0);
+
+out:
+ return err;
+}
+
static struct mlx5_flow_rule *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
@@ -144,16 +327,12 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
{
struct mlx5_flow_rule *flow_rule;
struct mlx5_esw_sq *esw_sq;
- int vport;
int err;
int i;
if (esw->mode != SRIOV_OFFLOADS)
return 0;
- vport = rep->vport == 0 ?
- FDB_UPLINK_VPORT : rep->vport;
-
for (i = 0; i < sqns_num; i++) {
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
if (!esw_sq) {
@@ -163,7 +342,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
/* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- vport,
+ rep->vport,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -446,7 +625,7 @@ out:
static int esw_offloads_start(struct mlx5_eswitch *esw)
{
- int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@@ -455,8 +634,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
- if (err)
- esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ }
return err;
}
@@ -508,12 +691,16 @@ create_ft_err:
static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
- int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
- if (err)
- esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ }
return err;
}
@@ -612,27 +799,36 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+ int vport_index,
+ struct mlx5_eswitch_rep *__rep)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport_index];
+
+ memset(rep, 0, sizeof(*rep));
- memcpy(&offloads->vport_reps[rep->vport], rep,
- sizeof(struct mlx5_eswitch_rep));
+ rep->load = __rep->load;
+ rep->unload = __rep->unload;
+ rep->vport = __rep->vport;
+ rep->priv_data = __rep->priv_data;
+ ether_addr_copy(rep->hw_id, __rep->hw_id);
- INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
- offloads->vport_reps[rep->vport].valid = true;
+ INIT_LIST_HEAD(&rep->vport_sqs_list);
+ rep->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport)
+ int vport_index)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- rep = &offloads->vport_reps[vport];
+ rep = &offloads->vport_reps[vport_index];
- if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
rep->unload(esw, rep);
- offloads->vport_reps[vport].valid = false;
+ rep->valid = false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7a0415e6d339..113c32326333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -401,11 +401,11 @@ struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
{
struct mlx5_cmd_fc_bulk *b;
- int outlen = sizeof(*b) +
+ int outlen =
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
- b = kzalloc(outlen, GFP_KERNEL);
+ b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 068ee65a960b..aa33d58b9f81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1100,10 +1100,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats;
}
- if (mlxsw_driver->profile->used_max_lag &&
- mlxsw_driver->profile->used_max_port_per_lag) {
- alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
- mlxsw_driver->profile->max_port_per_lag;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_core->resources.max_lag_valid &&
+ mlxsw_core->resources.max_ports_in_lag_valid) {
+ alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
+ mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
@@ -1111,11 +1116,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
- if (err)
- goto err_bus_init;
-
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -1146,10 +1146,10 @@ err_hwmon_init:
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
@@ -1615,7 +1615,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
+ return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
@@ -1644,7 +1644,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
- for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
+ for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d2e32979319c..c4f550b6f783 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -179,8 +179,6 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
- used_max_lag:1,
- used_max_port_per_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -192,10 +190,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
- used_kvd_sizes:1;
+ used_kvd_split_data:1; /* indicate for the kvd's values */
+
u8 max_vepa_channels;
- u16 max_lag;
- u16 max_port_per_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -214,8 +211,9 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
- u32 kvd_hash_single_size;
- u32 kvd_hash_double_size;
+ u16 kvd_hash_granularity;
+ u8 kvd_hash_single_parts;
+ u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -269,8 +267,35 @@ struct mlxsw_driver {
};
struct mlxsw_resources {
- u8 max_span_valid:1;
+ u32 max_span_valid:1,
+ max_lag_valid:1,
+ max_ports_in_lag_valid:1,
+ kvd_size_valid:1,
+ kvd_single_min_size_valid:1,
+ kvd_double_min_size_valid:1,
+ max_virtual_routers_valid:1,
+ max_system_ports_valid:1,
+ max_vlan_groups_valid:1,
+ max_regions_valid:1,
+ max_rif_valid:1;
u8 max_span;
+ u8 max_lag;
+ u8 max_ports_in_lag;
+ u32 kvd_size;
+ u32 kvd_single_min_size;
+ u32 kvd_double_min_size;
+ u16 max_virtual_routers;
+ u16 max_system_ports;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u16 max_rif;
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ u32 kvd_single_size;
+ u32 kvd_double_size;
+ u32 kvd_linear_size;
};
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1d1360c178bb..e742bd4e8894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1156,6 +1156,16 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_MAX_LAG_ID 0x2520
+#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
+#define MLXSW_KVD_SIZE_ID 0x1001
+#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
+#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
+#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
+#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
+#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
+#define MLXSW_MAX_REGIONS_ID 0x2901
+#define MLXSW_MAX_RIF_ID 0x2C02
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
#define MLXSW_RESOURCES_PER_QUERY 32
@@ -1167,6 +1177,46 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val,
resources->max_span = val;
resources->max_span_valid = 1;
break;
+ case MLXSW_MAX_LAG_ID:
+ resources->max_lag = val;
+ resources->max_lag_valid = 1;
+ break;
+ case MLXSW_MAX_PORTS_IN_LAG_ID:
+ resources->max_ports_in_lag = val;
+ resources->max_ports_in_lag_valid = 1;
+ break;
+ case MLXSW_KVD_SIZE_ID:
+ resources->kvd_size = val;
+ resources->kvd_size_valid = 1;
+ break;
+ case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
+ resources->kvd_single_min_size = val;
+ resources->kvd_single_min_size_valid = 1;
+ break;
+ case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
+ resources->kvd_double_min_size = val;
+ resources->kvd_double_min_size_valid = 1;
+ break;
+ case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
+ resources->max_virtual_routers = val;
+ resources->max_virtual_routers_valid = 1;
+ break;
+ case MLXSW_MAX_SYSTEM_PORT_ID:
+ resources->max_system_ports = val;
+ resources->max_system_ports_valid = 1;
+ break;
+ case MLXSW_MAX_VLAN_GROUPS_ID:
+ resources->max_vlan_groups = val;
+ resources->max_vlan_groups_valid = 1;
+ break;
+ case MLXSW_MAX_REGIONS_ID:
+ resources->max_regions = val;
+ resources->max_regions_valid = 1;
+ break;
+ case MLXSW_MAX_RIF_ID:
+ resources->max_rif = val;
+ resources->max_rif_valid = 1;
+ break;
default:
break;
}
@@ -1209,10 +1259,52 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
+static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
+{
+ u32 singles_size, doubles_size, linear_size;
+
+ if (!resources->kvd_single_min_size_valid ||
+ !resources->kvd_double_min_size_valid ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ linear_size = profile->kvd_linear_size;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile.
+ */
+ doubles_size = (resources->kvd_size - linear_size);
+ doubles_size *= profile->kvd_hash_double_parts;
+ doubles_size /= (profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts);
+ doubles_size /= profile->kvd_hash_granularity;
+ doubles_size *= profile->kvd_hash_granularity;
+ singles_size = resources->kvd_size - doubles_size -
+ linear_size;
+
+ /* Check results are legal. */
+ if (singles_size < resources->kvd_single_min_size ||
+ doubles_size < resources->kvd_double_min_size ||
+ resources->kvd_size < linear_size)
+ return -EIO;
+
+ resources->kvd_single_size = singles_size;
+ resources->kvd_double_size = doubles_size;
+ resources->kvd_linear_size = linear_size;
+
+ return 0;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
int i;
+ int err;
mlxsw_cmd_mbox_zero(mbox);
@@ -1222,18 +1314,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
- if (profile->used_max_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_lag_set(
- mbox, profile->max_lag);
- }
- if (profile->used_max_port_per_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
- mbox, profile->max_port_per_lag);
- }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
@@ -1310,19 +1390,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (profile->used_kvd_sizes) {
- mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
- mbox, profile->kvd_linear_size);
- mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
- mbox, profile->kvd_hash_single_size);
+ if (resources->kvd_size_valid) {
+ err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
+ if (err)
+ return err;
+
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
+ resources->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
+ resources->kvd_single_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
- mbox, profile->kvd_hash_double_size);
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
+ resources->kvd_double_size);
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
@@ -1524,7 +1607,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
if (err)
goto err_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 4e2354ca0e4a..6460c7256f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1392,7 +1392,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
- mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
+ mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 27bbcaf9cfcd..fd74d1064ff3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -248,7 +248,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int i;
@@ -262,7 +263,8 @@ struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
return NULL;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry
+*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -364,7 +366,8 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
/* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
if (err)
goto err_mpar_reg_write;
@@ -405,7 +408,8 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
return;
/* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
/* remove the SBIB buffer if it was egress SPAN */
@@ -819,9 +823,9 @@ err_span_port_mtu_update:
return err;
}
-static struct rtnl_link_stats64 *
-mlxsw_sp_port_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static int
+mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_pcpu_stats *p;
@@ -848,6 +852,107 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
tx_dropped += p->tx_dropped;
}
stats->tx_dropped = tx_dropped;
+ return 0;
+}
+
+static bool mlxsw_sp_port_has_offload_stats(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlxsw_sp_port_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+}
+
+static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl);
+ if (err)
+ goto out;
+
+ stats->tx_packets =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ stats->rx_packets =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ stats->tx_bytes =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ stats->rx_bytes =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ stats->multicast =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+
+ stats->rx_crc_errors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ stats->rx_frame_errors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+
+ stats->rx_length_errors = (
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
+
+ stats->rx_errors = (stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_length_errors);
+
+out:
+ return err;
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ container_of(work, struct mlxsw_sp_port,
+ hw_stats.update_dw.work);
+
+ if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ goto out;
+
+ mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
+ mlxsw_sp_port->hw_stats.cache);
+
+out:
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ MLXSW_HW_STATS_UPDATE_TIME);
+}
+
+/* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+
return stats;
}
@@ -1209,6 +1314,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
+ .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
@@ -1547,8 +1654,6 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1557,10 +1662,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
for (i = 0; i < len; i++)
- data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+ data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -2145,6 +2249,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->hw_stats.cache =
+ kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
+
+ if (!mlxsw_sp_port->hw_stats.cache) {
+ err = -ENOMEM;
+ goto err_alloc_hw_stats;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ &update_stats_cache);
+
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
@@ -2245,6 +2359,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
return 0;
err_core_port_init:
@@ -2265,6 +2380,8 @@ err_port_system_port_mapping_set:
err_dev_addr_init:
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
+ kfree(mlxsw_sp_port->hw_stats.cache);
+err_alloc_hw_stats:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2281,6 +2398,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
+ cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -2290,6 +2408,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
+ kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2768,7 +2887,9 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
@@ -2779,7 +2900,26 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(resources->max_lag,
+ sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
@@ -2863,6 +3003,7 @@ err_span_init:
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
@@ -2876,38 +3017,26 @@ err_rx_listener_register:
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- int i;
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = MLXSW_SP_LAG_MAX,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
- .used_max_system_port = 1,
- .max_system_port = 64,
- .used_max_vlan_groups = 1,
- .max_vlan_groups = 127,
- .used_max_regions = 1,
- .max_regions = 400,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -2919,10 +3048,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvd_sizes = 1,
+ .used_kvd_split_data = 1,
+ .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
+ .kvd_hash_single_parts = 2,
+ .kvd_hash_double_parts = 1,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
- .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
- .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
@@ -3039,13 +3169,15 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
if (!mlxsw_sp->rifs[i])
return i;
- return MLXSW_SP_RIF_MAX;
+ return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
@@ -3125,7 +3257,7 @@ mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
@@ -3357,7 +3489,7 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
@@ -3564,12 +3696,14 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
@@ -3775,11 +3909,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
- for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
@@ -3813,9 +3949,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 969c250b3048..9b22863a924b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -45,7 +45,7 @@
#include <linux/list.h>
#include <linux/dcbnl.h>
#include <linux/in6.h>
-#include <net/switchdev.h>
+#include <linux/notifier.h>
#include "port.h"
#include "core.h"
@@ -54,10 +54,7 @@
#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_RIF_MAX 800
-
-#define MLXSW_SP_LAG_MAX 64
-#define MLXSW_SP_PORT_PER_LAG_MAX 16
+#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
@@ -67,8 +64,6 @@
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -77,8 +72,7 @@
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
-#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
-#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+#define MLXSW_SP_KVD_GRANULARITY 128
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
@@ -253,7 +247,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
- struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct {
struct delayed_work dw;
@@ -263,6 +257,7 @@ struct mlxsw_sp_router {
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_group_list;
struct list_head nexthop_neighs_list;
+ bool aborted;
};
struct mlxsw_sp {
@@ -275,7 +270,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
+ struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -290,7 +285,7 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
- struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ struct mlxsw_sp_upper *lags;
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
@@ -302,6 +297,7 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
+ struct notifier_block fib_nb;
};
static inline struct mlxsw_sp_upper *
@@ -361,6 +357,11 @@ struct mlxsw_sp_port {
struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
+ struct {
+ #define MLXSW_HW_STATS_UPDATE_TIME HZ
+ struct rtnl_link_stats64 *cache;
+ struct delayed_work update_dw;
+ } hw_stats;
};
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
@@ -478,9 +479,12 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+
+ for (i = 0; i < resources->max_rif; i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
@@ -582,11 +586,6 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 953b214f38d0..bcaed8a38037 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -595,9 +595,9 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
- pool_info->pool_type = dir;
+ pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
- pool_info->threshold_type = pr->mode;
+ pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -608,9 +608,10 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- enum mlxsw_reg_sbpr_mode mode = threshold_type;
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+ enum mlxsw_reg_sbpr_mode mode;
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
@@ -696,13 +697,13 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
cm->max_buff);
- *p_pool_index = pool_index_get(cm->pool, pool_type);
+ *p_pool_index = pool_index_get(cm->pool, dir);
return 0;
}
@@ -716,7 +717,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
@@ -943,7 +944,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3f5c51da6d3e..78fc557d6dd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -43,6 +43,7 @@
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
+#include <net/ip_fib.h>
#include "spectrum.h"
#include "core.h"
@@ -122,17 +123,20 @@ struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
+ struct list_head list;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
+ struct fib_info *fi;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
};
struct mlxsw_sp_fib {
struct rhashtable ht;
+ struct list_head entry_list;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
};
@@ -154,6 +158,7 @@ static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
mlxsw_sp_fib_ht_params);
if (err)
return err;
+ list_add_tail(&fib_entry->list, &fib->entry_list);
if (fib->prefix_ref_count[prefix_len]++ == 0)
mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
return 0;
@@ -166,6 +171,7 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
if (--fib->prefix_ref_count[prefix_len] == 0)
mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ list_del(&fib_entry->list);
rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
mlxsw_sp_fib_ht_params);
}
@@ -216,6 +222,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
if (err)
goto err_rhashtable_init;
+ INIT_LIST_HEAD(&fib->entry_list);
return fib;
err_rhashtable_init:
@@ -252,7 +259,9 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -261,7 +270,9 @@ static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -368,10 +379,12 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
@@ -384,7 +397,9 @@ static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto,
+ vr->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -394,7 +409,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -410,11 +426,14 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
@@ -548,15 +567,33 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
&vr->fib->prefix_usage);
}
-static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_virtual_routers_valid)
+ return -EIO;
+
+ mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
+ sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.vrs)
+ return -ENOMEM;
+
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
@@ -1081,9 +1118,10 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
{
char raleu_pl[MLXSW_REG_RALEU_LEN];
- mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
- adj_index, ecmp_size,
- new_adj_index, new_ecmp_size);
+ mlxsw_reg_raleu_pack(raleu_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
+ adj_index, ecmp_size, new_adj_index,
+ new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1489,50 +1527,6 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
}
-static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- char rgcr_pl[MLXSW_REG_RGCR_LEN];
-
- mlxsw_reg_rgcr_pack(rgcr_pl, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
-}
-
-int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
- INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
- err = __mlxsw_sp_router_init(mlxsw_sp);
- if (err)
- return err;
- mlxsw_sp_lpm_init(mlxsw_sp);
- mlxsw_sp_vrs_init(mlxsw_sp);
- err = mlxsw_sp_neigh_init(mlxsw_sp);
- if (err)
- goto err_neigh_init;
- return 0;
-
-err_neigh_init:
- __mlxsw_sp_router_fini(mlxsw_sp);
- return err;
-}
-
-void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
-{
- mlxsw_sp_neigh_fini(mlxsw_sp);
- __mlxsw_sp_router_fini(mlxsw_sp);
-}
-
static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -1558,8 +1552,9 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,8 +1568,9 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl,
MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
fib_entry->rif);
@@ -1589,8 +1585,9 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1637,94 +1634,102 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
-struct mlxsw_sp_router_fib4_add_info {
- struct switchdev_trans_item tritem;
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_fib_entry *fib_entry;
-};
-
-static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
-{
- const struct mlxsw_sp_router_fib4_add_info *info = data;
- struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
- struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
- struct mlxsw_sp_vr *vr = fib_entry->vr;
-
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
- kfree(info);
-}
-
static int
mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4,
+ const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
+ struct mlxsw_sp_rif *r = NULL;
+ int nhsel;
+ int err;
- if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
}
- if (fib4->type != RTN_UNICAST)
+ if (fen_info->type != RTN_UNICAST)
return -EINVAL;
- if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
- struct mlxsw_sp_rif *r;
+ for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
+ const struct fib_nh *nh = &fi->fib_nh[nhsel];
+
+ if (!nh->nh_dev)
+ continue;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
+ if (!r) {
+ /* In case router interface is not found for
+ * at least one of the nexthops, that means
+ * the nexthop points to some device unrelated
+ * to us. Set trap and pass the packets for
+ * this prefix to kernel.
+ */
+ break;
+ }
+ }
+ if (!r) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
- if (!r)
- return -EINVAL;
fib_entry->rif = r->rif;
- return 0;
+ } else {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ if (err)
+ return err;
}
- fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
- return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
}
static void
mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
- return;
- mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
+ fib_info_offload_dec(fib_entry->fi);
+ if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
- struct fib_info *fi = fib4->fi;
+ struct fib_info *fi = fen_info->fi;
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
return ERR_CAST(vr);
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (fib_entry) {
/* Already exists, just take a reference */
fib_entry->ref_count++;
return fib_entry;
}
- fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
- sizeof(fib4->dst),
- fib4->dst_len, fi->fib_dev);
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
+ fib_entry->fi = fi;
fib_entry->ref_count = 1;
- err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
if (err)
goto err_fib4_entry_init;
@@ -1740,21 +1745,23 @@ err_fib_entry_create:
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
- const struct switchdev_obj_ipv4_fib *fib4)
+ const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
if (!vr)
return NULL;
- return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len,
- fib4->fi->fib_dev);
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
+ sizeof(fen_info->dst),
+ fen_info->dst_len,
+ fen_info->fi->fib_dev);
}
-void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_vr *vr = fib_entry->vr;
@@ -1765,60 +1772,43 @@ void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
- struct mlxsw_sp_fib_entry *fib_entry;
- int err;
+ unsigned int last_ref_count;
- fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
- if (IS_ERR(fib_entry))
- return PTR_ERR(fib_entry);
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto err_alloc_info;
- }
- info->mlxsw_sp = mlxsw_sp;
- info->fib_entry = fib_entry;
- switchdev_trans_item_enqueue(trans, info,
- mlxsw_sp_router_fib4_add_info_destroy,
- &info->tritem);
- return 0;
-
-err_alloc_info:
- mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return err;
+ do {
+ last_ref_count = fib_entry->ref_count;
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ } while (last_ref_count != 1);
}
-static int
-mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_vr *vr;
int err;
- info = switchdev_trans_item_dequeue(trans);
- fib_entry = info->fib_entry;
- kfree(info);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
+ if (IS_ERR(fib_entry)) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
+ return PTR_ERR(fib_entry);
+ }
if (fib_entry->ref_count != 1)
return 0;
vr = fib_entry->vr;
err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
- if (err)
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
goto err_fib_entry_insert;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ }
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@@ -1830,24 +1820,15 @@ err_fib_entry_insert:
return err;
}
-int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- if (switchdev_trans_ph_prepare(trans))
- return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
- fib4, trans);
- return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
- fib4, trans);
-}
-
-int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
- fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
+ if (mlxsw_sp->router.aborted)
+ return 0;
+
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
@@ -1861,3 +1842,172 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}
+
+static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ int err;
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_entry *tmp;
+ struct mlxsw_sp_vr *vr;
+ int i;
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ continue;
+
+ list_for_each_entry_safe(fib_entry, tmp,
+ &vr->fib->entry_list, list) {
+ bool do_break = &tmp->list == &vr->fib->entry_list;
+
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
+ fib_entry);
+ mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
+ if (do_break)
+ break;
+ }
+ }
+ mlxsw_sp->router.aborted = true;
+ err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_rif_valid)
+ return -EIO;
+
+ mlxsw_sp->rifs = kcalloc(resources->max_rif,
+ sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
+}
+
+static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
+ if (err)
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fib4_del(mlxsw_sp, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ mlxsw_sp_router_fib4_abort(mlxsw_sp);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ err = mlxsw_sp_vrs_init(mlxsw_sp);
+ if (err)
+ goto err_vrs_init;
+
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+
+ mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
+ register_fib_notifier(&mlxsw_sp->fib_nb);
+ return 0;
+
+err_neigh_init:
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+err_vrs_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ unregister_fib_notifier(&mlxsw_sp->fib_nb);
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7186c4810785..5e00c79e8133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1044,11 +1044,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -1181,10 +1176,6 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1205,9 +1196,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 377daa4d509c..8b15bf0c744d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1512,10 +1512,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = 64,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = 16,
.used_max_mid = 1,
.max_mid = 7000,
.used_max_pgt = 1,
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 68178819ff12..0efb2ba9a558 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -3,6 +3,13 @@ obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o
nfp_netvf-objs := \
nfp_net_common.o \
nfp_net_ethtool.o \
+ nfp_net_offload.o \
nfp_netvf_main.o
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+nfp_netvf-objs += \
+ nfp_bpf_verifier.o \
+ nfp_bpf_jit.o
+endif
+
nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
new file mode 100644
index 000000000000..22484b6fd3e8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_ASM_H__
+#define __NFP_ASM_H__ 1
+
+#include "nfp_bpf.h"
+
+#define REG_NONE 0
+
+#define RE_REG_NO_DST 0x020
+#define RE_REG_IMM 0x020
+#define RE_REG_IMM_encode(x) \
+ (RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
+#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_XFR 0x080
+
+#define UR_REG_XFR 0x180
+#define UR_REG_NN 0x280
+#define UR_REG_NO_DST 0x300
+#define UR_REG_IMM UR_REG_NO_DST
+#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
+#define UR_REG_IMM_MAX 0x0ffULL
+
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
+
+#define nfp_is_br(_insn) \
+ (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
+
+enum br_mask {
+ BR_BEQ = 0x00,
+ BR_BNE = 0x01,
+ BR_BHS = 0x04,
+ BR_BLO = 0x05,
+ BR_BGE = 0x08,
+ BR_UNC = 0x18,
+};
+
+enum br_ev_pip {
+ BR_EV_PIP_UNCOND = 0,
+ BR_EV_PIP_COND = 1,
+};
+
+enum br_ctx_signal_state {
+ BR_CSS_NONE = 2,
+};
+
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+
+enum immed_width {
+ IMMED_WIDTH_ALL = 0,
+ IMMED_WIDTH_BYTE = 1,
+ IMMED_WIDTH_WORD = 2,
+};
+
+enum immed_shift {
+ IMMED_SHIFT_0B = 0,
+ IMMED_SHIFT_1B = 1,
+ IMMED_SHIFT_2B = 2,
+};
+
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+
+enum shf_op {
+ SHF_OP_NONE = 0,
+ SHF_OP_AND = 2,
+ SHF_OP_OR = 5,
+};
+
+enum shf_sc {
+ SHF_SC_R_ROT = 0,
+ SHF_SC_R_SHF = 1,
+ SHF_SC_L_SHF = 2,
+ SHF_SC_R_DSHF = 3,
+};
+
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+
+enum alu_op {
+ ALU_OP_NONE = 0x00,
+ ALU_OP_ADD = 0x01,
+ ALU_OP_NEG = 0x04,
+ ALU_OP_AND = 0x08,
+ ALU_OP_SUB_C = 0x0d,
+ ALU_OP_ADD_C = 0x11,
+ ALU_OP_OR = 0x14,
+ ALU_OP_SUB = 0x15,
+ ALU_OP_XOR = 0x18,
+};
+
+enum alu_dst_ab {
+ ALU_DST_A = 0,
+ ALU_DST_B = 1,
+};
+
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
+
+struct cmd_tgt_act {
+ u8 token;
+ u8 tgt_cmd;
+};
+
+enum cmd_tgt_map {
+ CMD_TGT_READ8,
+ CMD_TGT_WRITE8,
+ CMD_TGT_READ_LE,
+ CMD_TGT_READ_SWAP_LE,
+ __CMD_TGT_MAP_SIZE,
+};
+
+enum cmd_mode {
+ CMD_MODE_40b_AB = 0,
+ CMD_MODE_40b_BA = 1,
+ CMD_MODE_32b = 4,
+};
+
+enum cmd_ctx_swap {
+ CMD_CTX_SWAP = 0,
+ CMD_CTX_NO_SWAP = 3,
+};
+
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+
+enum lcsr_wr_src {
+ LCSR_WR_AREG,
+ LCSR_WR_BREG,
+ LCSR_WR_IMM,
+};
+
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
new file mode 100644
index 000000000000..87aa8a3e9112
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_BPF_H__
+#define __NFP_BPF_H__ 1
+
+#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
+
+/* For branch fixup logic use up-most byte of branch instruction as scratch
+ * area. Remember to clear this before sending instructions to HW!
+ */
+#define OP_BR_SPECIAL 0xff00000000000000ULL
+
+enum br_special {
+ OP_BR_NORMAL = 0,
+ OP_BR_GO_OUT,
+ OP_BR_GO_ABORT,
+};
+
+enum static_regs {
+ STATIC_REG_PKT = 1,
+#define REG_PKT_BANK ALU_DST_A
+ STATIC_REG_IMM = 2, /* Bank AB */
+};
+
+enum nfp_bpf_action_type {
+ NN_ACT_TC_DROP,
+ NN_ACT_TC_REDIR,
+ NN_ACT_DIRECT,
+};
+
+/* Software register representation, hardware encoding in asm.h */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+};
+
+#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
+
+#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
+#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
+#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
+#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
+#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
+#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
+#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+
+#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
+#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+
+#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAG_MARK 1
+#define NFP_BPF_ABI_MARK reg_nnr(1)
+#define NFP_BPF_ABI_PKT reg_nnr(2)
+#define NFP_BPF_ABI_LEN reg_nnr(3)
+
+struct nfp_prog;
+struct nfp_insn_meta;
+typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
+
+#define nfp_prog_first_meta(nfp_prog) \
+ list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_prog_last_meta(nfp_prog) \
+ list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_meta_next(meta) list_next_entry(meta, l)
+#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+
+/**
+ * struct nfp_insn_meta - BPF instruction wrapper
+ * @insn: BPF instruction
+ * @off: index of first generated machine instruction (in nfp_prog.prog)
+ * @n: eBPF instruction number
+ * @skip: skip this instruction (optimized out)
+ * @double_cb: callback for second part of the instruction
+ * @l: link on nfp_prog->insns list
+ */
+struct nfp_insn_meta {
+ struct bpf_insn insn;
+ unsigned int off;
+ unsigned short n;
+ bool skip;
+ instr_cb_t double_cb;
+
+ struct list_head l;
+};
+
+#define BPF_SIZE_MASK 0x18
+
+static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
+{
+ return BPF_CLASS(meta->insn.code);
+}
+
+static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
+{
+ return BPF_SRC(meta->insn.code);
+}
+
+static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
+{
+ return BPF_OP(meta->insn.code);
+}
+
+static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
+{
+ return BPF_MODE(meta->insn.code);
+}
+
+/**
+ * struct nfp_prog - nfp BPF program
+ * @prog: machine code
+ * @prog_len: number of valid instructions in @prog array
+ * @__prog_alloc_len: alloc size of @prog array
+ * @act: BPF program/action type (TC DA, TC with action, XDP etc.)
+ * @num_regs: number of registers used by this program
+ * @regs_per_thread: number of basic registers allocated per thread
+ * @start_off: address of the first instruction in the memory
+ * @tgt_out: jump target for normal exit
+ * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
+ * @tgt_done: jump target to get the next packet
+ * @n_translated: number of successfully translated instructions (for errors)
+ * @error: error code if something went wrong
+ * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
+ */
+struct nfp_prog {
+ u64 *prog;
+ unsigned int prog_len;
+ unsigned int __prog_alloc_len;
+
+ enum nfp_bpf_action_type act;
+
+ unsigned int num_regs;
+ unsigned int regs_per_thread;
+
+ unsigned int start_off;
+ unsigned int tgt_out;
+ unsigned int tgt_abort;
+ unsigned int tgt_done;
+
+ unsigned int n_translated;
+ int error;
+
+ struct list_head insns;
+};
+
+struct nfp_bpf_result {
+ unsigned int n_instr;
+ bool dense_mode;
+};
+
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res);
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
new file mode 100644
index 000000000000..f8df5300f49c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -0,0 +1,1813 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/pkt_cls.h>
+#include <linux/unistd.h>
+
+#include "nfp_asm.h"
+#include "nfp_bpf.h"
+
+/* --- NFP prog --- */
+/* Foreach "multiple" entries macros provide pos and next<n> pointers.
+ * It's safe to modify the next pointers (but not pos).
+ */
+#define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos))
+
+#define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l), \
+ next2 = list_next_entry(next, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l && \
+ &(nfp_prog)->insns != &next2->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos), \
+ next2 = nfp_meta_next(next))
+
+static bool
+nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.next != &nfp_prog->insns;
+}
+
+static bool
+nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.prev != &nfp_prog->insns;
+}
+
+static void nfp_prog_free(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *tmp;
+
+ list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
+ list_del(&meta->l);
+ kfree(meta);
+ }
+ kfree(nfp_prog);
+}
+
+static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
+{
+ if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
+ nfp_prog->error = -ENOSPC;
+ return;
+ }
+
+ nfp_prog->prog[nfp_prog->prog_len] = insn;
+ nfp_prog->prog_len++;
+}
+
+static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
+{
+ return nfp_prog->start_off + nfp_prog->prog_len;
+}
+
+static unsigned int
+nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+{
+ return offset - nfp_prog->start_off;
+}
+
+/* --- SW reg --- */
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+};
+
+static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding %08x\n", swreg);
+ return 0;
+ }
+}
+
+static int
+swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+ }
+}
+
+static int
+swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
+ bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ return 0;
+}
+
+/* --- Emitters --- */
+static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8] = { 0x00, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static void
+__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+{
+ enum cmd_ctx_swap ctx;
+ u64 insn;
+
+ if (sync)
+ ctx = CMD_CTX_SWAP;
+ else
+ ctx = CMD_CTX_NO_SWAP;
+
+ insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
+ FIELD_PREP(OP_CMD_CTX, ctx) |
+ FIELD_PREP(OP_CMD_B_SRC, breg) |
+ FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
+ FIELD_PREP(OP_CMD_XFER, xfer) |
+ FIELD_PREP(OP_CMD_CNT, size) |
+ FIELD_PREP(OP_CMD_SIG, sync) |
+ FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_MODE, mode);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+ if (reg.swap) {
+ pr_err("cmd can't swap arguments\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+}
+
+static void
+__emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
+ enum br_ctx_signal_state css, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BR_BASE |
+ FIELD_PREP(OP_BR_MASK, mask) |
+ FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
+ FIELD_PREP(OP_BR_CSS, css) |
+ FIELD_PREP(OP_BR_DEFBR, defer) |
+ FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+{
+ if (defer > 2) {
+ pr_err("BUG: branch defer out of bounds %d\n", defer);
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+}
+
+static void
+emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
+{
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+}
+
+static void
+__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
+ u8 byte, bool equal, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BBYTE_BASE |
+ FIELD_PREP(OP_BB_A_SRC, areg) |
+ FIELD_PREP(OP_BB_BYTE, byte) |
+ FIELD_PREP(OP_BB_B_SRC, breg) |
+ FIELD_PREP(OP_BB_I8, imm8) |
+ FIELD_PREP(OP_BB_EQ, equal) |
+ FIELD_PREP(OP_BB_DEFBR, defer) |
+ FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_br_byte_neq(struct nfp_prog *nfp_prog,
+ u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
+ defer);
+}
+
+static void
+__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
+ enum immed_width width, bool invert,
+ enum immed_shift shift, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_IMMED_BASE |
+ FIELD_PREP(OP_IMMED_A_SRC, areg) |
+ FIELD_PREP(OP_IMMED_B_SRC, breg) |
+ FIELD_PREP(OP_IMMED_IMM, imm_hi) |
+ FIELD_PREP(OP_IMMED_WIDTH, width) |
+ FIELD_PREP(OP_IMMED_INV, invert) |
+ FIELD_PREP(OP_IMMED_SHIFT, shift) |
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+ enum immed_width width, bool invert, enum immed_shift shift)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
+ invert, shift, reg.wr_both);
+}
+
+static void
+__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ enum shf_sc sc, u8 shift,
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+{
+ u64 insn;
+
+ if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ if (sc == SHF_SC_L_SHF)
+ shift = 32 - shift;
+
+ insn = OP_SHF_BASE |
+ FIELD_PREP(OP_SHF_A_SRC, areg) |
+ FIELD_PREP(OP_SHF_SC, sc) |
+ FIELD_PREP(OP_SHF_B_SRC, breg) |
+ FIELD_PREP(OP_SHF_I8, i8) |
+ FIELD_PREP(OP_SHF_SW, sw) |
+ FIELD_PREP(OP_SHF_DST, dst) |
+ FIELD_PREP(OP_SHF_SHIFT, shift) |
+ FIELD_PREP(OP_SHF_OP, op) |
+ FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
+ FIELD_PREP(OP_SHF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
+ enum shf_sc sc, u8 shift)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_ALU_BASE |
+ FIELD_PREP(OP_ALU_A_SRC, areg) |
+ FIELD_PREP(OP_ALU_B_SRC, breg) |
+ FIELD_PREP(OP_ALU_DST, dst) |
+ FIELD_PREP(OP_ALU_SW, swap) |
+ FIELD_PREP(OP_ALU_OP, op) |
+ FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
+ FIELD_PREP(OP_ALU_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
+ u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
+ bool zero, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_LDF_BASE |
+ FIELD_PREP(OP_LDF_A_SRC, areg) |
+ FIELD_PREP(OP_LDF_SC, sc) |
+ FIELD_PREP(OP_LDF_B_SRC, breg) |
+ FIELD_PREP(OP_LDF_I8, imm8) |
+ FIELD_PREP(OP_LDF_SW, swap) |
+ FIELD_PREP(OP_LDF_ZF, zero) |
+ FIELD_PREP(OP_LDF_BMASK, bmask) |
+ FIELD_PREP(OP_LDF_SHF, shift) |
+ FIELD_PREP(OP_LDF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
+ u32 dst, u8 bmask, u32 src, bool zero)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
+ reg.i8, zero, reg.swap, reg.wr_both);
+}
+
+static void
+emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+ enum shf_sc sc, u8 shift)
+{
+ emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+}
+
+/* --- Wrappers --- */
+static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
+{
+ if (!(imm & 0xffff0000)) {
+ *val = imm;
+ *shift = IMMED_SHIFT_0B;
+ } else if (!(imm & 0xff0000ff)) {
+ *val = imm >> 8;
+ *shift = IMMED_SHIFT_1B;
+ } else if (!(imm & 0x0000ffff)) {
+ *val = imm >> 16;
+ *shift = IMMED_SHIFT_2B;
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+{
+ enum immed_shift shift;
+ u16 val;
+
+ if (pack_immed(imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
+ } else if (pack_immed(~imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
+ } else {
+ emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
+ false, IMMED_SHIFT_0B);
+ emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
+ false, IMMED_SHIFT_2B);
+ }
+}
+
+/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(UR_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+/* re_load_imm_any() - encode immediate or use tmp register (restricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(RE_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+static void
+wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
+ enum br_special special)
+{
+ emit_br(nfp_prog, mask, 0, 0);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_BR_SPECIAL, special);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+}
+
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
+ u16 src, bool src_valid, u8 size)
+{
+ unsigned int i;
+ u16 shift, sz;
+ u32 tmp_reg;
+
+ /* We load the value from the address indicated in @offset and then
+ * shift out the data we don't need. Note: this is big endian!
+ */
+ sz = size < 4 ? 4 : size;
+ shift = size < 4 ? 4 - size : 0;
+
+ if (src_valid) {
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog),
+ reg_a(src), ALU_OP_ADD, tmp_reg);
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
+ } else {
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
+ imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
+ }
+
+ i = 0;
+ if (shift)
+ emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ reg_xfer(0), SHF_SC_R_SHF, shift * 8);
+ else
+ for (; i * 4 < size; i++)
+ emit_alu(nfp_prog, reg_both(i),
+ reg_none(), ALU_OP_NONE, reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ return 0;
+}
+
+static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+{
+ return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+}
+
+static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+{
+ emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
+ reg_none(), ALU_OP_NONE, reg_b(src));
+ emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
+ NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+
+ return 0;
+}
+
+static void
+wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
+{
+ u32 tmp_reg;
+
+ if (alu_op == ALU_OP_AND) {
+ if (!imm)
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_OR) {
+ if (!~imm)
+ wrp_immed(nfp_prog, reg_both(dst), ~0U);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_XOR) {
+ if (!~imm)
+ emit_alu(nfp_prog, reg_both(dst), reg_none(),
+ ALU_OP_NEG, reg_b(dst));
+ if (!imm || !~imm)
+ return;
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
+}
+
+static int
+wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
+
+ return 0;
+}
+
+static int
+wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ emit_alu(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), alu_op, reg_b(src + 1));
+
+ return 0;
+}
+
+static int
+wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int
+wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static void
+wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
+ enum br_mask br_mask, u16 off)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
+ emit_br(nfp_prog, br_mask, off, 0);
+}
+
+static int
+wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, enum br_mask br_mask)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
+ insn->src_reg * 2, br_mask, insn->off);
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
+ insn->src_reg * 2 + 1, br_mask, insn->off);
+
+ return 0;
+}
+
+static int
+wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u8 reg = insn->dst_reg * 2;
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(),
+ tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+static int
+wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (swap) {
+ areg ^= breg;
+ breg ^= areg;
+ areg ^= breg;
+ }
+
+ emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+/* --- Callbacks --- */
+static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
+
+ return 0;
+}
+
+static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
+
+ return 0;
+}
+
+static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
+}
+
+static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_ADD,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
+
+ return 0;
+}
+
+static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_SUB,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
+
+ return 0;
+}
+
+static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+
+ return 0;
+}
+
+static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+}
+
+static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
+}
+
+static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+}
+
+static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
+}
+
+static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+}
+
+static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (!insn->imm)
+ return 1; /* TODO: zero shift means indirect */
+
+ emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
+ SHF_SC_L_SHF, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
+ meta->insn.imm);
+
+ return 0;
+}
+
+static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ meta->double_cb = imm_ld8_part2;
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+
+ return 0;
+}
+
+static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+}
+
+static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+}
+
+static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+}
+
+static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 1);
+}
+
+static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 2);
+}
+
+static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 4);
+}
+
+static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, len))
+ emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
+ else
+ return -ENOTSUPP;
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, mark))
+ return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+
+ return -ENOTSUPP;
+}
+
+static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off < 0) /* TODO */
+ return -ENOTSUPP;
+ emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
+
+ return 0;
+}
+
+static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ or1 = imm_a(nfp_prog);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ or2 = imm_b(nfp_prog);
+ }
+
+ emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ meta->skip = true;
+ return 0;
+ }
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ return 0;
+}
+
+static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
+ ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ return 0;
+}
+
+static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
+ emit_alu(nfp_prog, reg_none(),
+ imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
+}
+
+static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
+}
+
+static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+
+ return 0;
+}
+
+static const instr_cb_t instr_cb[256] = {
+ [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
+ [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
+ [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
+ [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
+ [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
+ [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
+ [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
+ [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
+ [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
+ [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
+ [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
+ [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
+ [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
+ [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
+ [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
+ [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
+ [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
+ [BPF_ALU | BPF_AND | BPF_X] = and_reg,
+ [BPF_ALU | BPF_AND | BPF_K] = and_imm,
+ [BPF_ALU | BPF_OR | BPF_X] = or_reg,
+ [BPF_ALU | BPF_OR | BPF_K] = or_imm,
+ [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
+ [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
+ [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
+ [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
+ [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
+ [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
+ [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
+ [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
+ [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
+ [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_JMP | BPF_JA | BPF_K] = jump,
+ [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
+ [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
+ [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
+ [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
+ [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
+ [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_EXIT] = goto_out,
+};
+
+/* --- Misc code --- */
+static void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+/* --- Assembler logic --- */
+static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *next;
+ u32 off, br_idx;
+ u32 idx;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ if (meta->skip)
+ continue;
+ if (BPF_CLASS(meta->insn.code) != BPF_JMP)
+ continue;
+
+ br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (!nfp_is_br(nfp_prog->prog[br_idx])) {
+ pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
+ br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
+ return -ELOOP;
+ }
+ /* Leave special branches for later */
+ if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ continue;
+
+ /* Find the target offset in assembler realm */
+ off = meta->insn.off;
+ if (!off) {
+ pr_err("Fixup found zero offset!!\n");
+ return -ELOOP;
+ }
+
+ while (off && nfp_meta_has_next(nfp_prog, next)) {
+ next = nfp_meta_next(next);
+ off--;
+ }
+ if (off) {
+ pr_err("Fixup found too large jump!! %d\n", off);
+ return -ELOOP;
+ }
+
+ if (next->skip) {
+ pr_err("Branch landing on removed instruction!!\n");
+ return -ELOOP;
+ }
+
+ for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
+ idx <= br_idx; idx++) {
+ if (!nfp_is_br(nfp_prog->prog[idx]))
+ continue;
+ br_set_offset(&nfp_prog->prog[idx], next->off);
+ }
+ }
+
+ /* Fixup 'goto out's separately, they can be scattered around */
+ for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
+ enum br_special special;
+
+ if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
+ continue;
+
+ special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
+ switch (special) {
+ case OP_BR_NORMAL:
+ break;
+ case OP_BR_GO_OUT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_out);
+ break;
+ case OP_BR_GO_ABORT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_abort);
+ break;
+ }
+
+ nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
+ }
+
+ return 0;
+}
+
+static void nfp_intro(struct nfp_prog *nfp_prog)
+{
+ emit_alu(nfp_prog, pkt_reg(nfp_prog),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+}
+
+static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
+{
+ const u8 act2code[] = {
+ [NN_ACT_TC_DROP] = 0x22,
+ [NN_ACT_TC_REDIR] = 0x24
+ };
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+ /* Legacy TC mode:
+ * 0 0x11 -> pass, count as stat0
+ * -1 drop 0x22 -> drop, count as stat1
+ * redir 0x24 -> redir, count as stat1
+ * ife mark 0x21 -> pass, count as stat1
+ * ife + tx 0x24 -> redir, count as stat1
+ */
+ emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
+ SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
+{
+ /* TC direct-action mode:
+ * 0,1 ok NOT SUPPORTED[1]
+ * 2 drop 0x22 -> drop, count as stat1
+ * 4,5 nuke 0x02 -> drop
+ * 7 redir 0x44 -> redir, count as stat2
+ * * unspec 0x11 -> pass, count as stat0
+ *
+ * [1] We can't support OK and RECLASSIFY because we can't tell TC
+ * the exact decision made. We are forced to support UNSPEC
+ * to handle aborts so that's the only one we handle for passing
+ * packets up the stack.
+ */
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+
+ /* if R0 > 7 jump to abort */
+ emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
+ emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+
+ wrp_immed(nfp_prog, reg_b(2), 0x41221211);
+ wrp_immed(nfp_prog, reg_b(3), 0x41001211);
+
+ emit_shf(nfp_prog, reg_a(1),
+ reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_a(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_b(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_shf(nfp_prog, reg_b(2),
+ reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro(struct nfp_prog *nfp_prog)
+{
+ switch (nfp_prog->act) {
+ case NN_ACT_DIRECT:
+ nfp_outro_tc_da(nfp_prog);
+ break;
+ case NN_ACT_TC_DROP:
+ case NN_ACT_TC_REDIR:
+ nfp_outro_tc_legacy(nfp_prog);
+ break;
+ }
+}
+
+static int nfp_translate(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+ int err;
+
+ nfp_intro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ instr_cb_t cb = instr_cb[meta->insn.code];
+
+ meta->off = nfp_prog_current_offset(nfp_prog);
+
+ if (meta->skip) {
+ nfp_prog->n_translated++;
+ continue;
+ }
+
+ if (nfp_meta_has_prev(nfp_prog, meta) &&
+ nfp_meta_prev(meta)->double_cb)
+ cb = nfp_meta_prev(meta)->double_cb;
+ if (!cb)
+ return -ENOENT;
+ err = cb(nfp_prog, meta);
+ if (err)
+ return err;
+
+ nfp_prog->n_translated++;
+ }
+
+ nfp_outro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ return nfp_fixup_branches(nfp_prog);
+}
+
+static int
+nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
+ unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct nfp_insn_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->insn = prog[i];
+ meta->n = i;
+
+ list_add_tail(&meta->l, &nfp_prog->insns);
+ }
+
+ return 0;
+}
+
+/* --- Optimizations --- */
+static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ /* Programs converted from cBPF start with register xoring */
+ if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
+ insn.src_reg == insn.dst_reg)
+ continue;
+
+ /* Programs start with R6 = R1 but we ignore the skb pointer */
+ if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn.src_reg == 1 && insn.dst_reg == 6)
+ meta->skip = true;
+
+ /* Return as soon as something doesn't match */
+ if (!meta->skip)
+ return;
+ }
+}
+
+/* Try to rename registers so that program uses only low ones */
+static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
+{
+ bool reg_used[MAX_BPF_REG] = {};
+ u8 tgt_reg[MAX_BPF_REG] = {};
+ struct nfp_insn_meta *meta;
+ unsigned int i, j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (meta->skip)
+ continue;
+
+ reg_used[meta->insn.src_reg] = true;
+ reg_used[meta->insn.dst_reg] = true;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
+ if (!reg_used[i])
+ continue;
+
+ tgt_reg[i] = j++;
+ }
+ nfp_prog->num_regs = j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
+ meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
+ }
+
+ return 0;
+}
+
+/* Remove masking after load since our load guarantees this is not needed */
+static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2;
+ const s32 exp_mask[] = {
+ [BPF_B] = 0x000000ffU,
+ [BPF_H] = 0x0000ffffU,
+ [BPF_W] = 0xffffffffU,
+ };
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn insn, next;
+
+ insn = meta1->insn;
+ next = meta2->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+
+ if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
+ continue;
+
+ if (!exp_mask[BPF_SIZE(insn.code)])
+ continue;
+ if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
+ continue;
+
+ if (next.src_reg || next.dst_reg)
+ continue;
+
+ meta2->skip = true;
+ }
+}
+
+static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2, *meta3;
+
+ nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
+ struct bpf_insn insn, next1, next2;
+
+ insn = meta1->insn;
+ next1 = meta2->insn;
+ next2 = meta3->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+ if (BPF_SIZE(insn.code) != BPF_W)
+ continue;
+
+ if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
+ !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
+ continue;
+
+ if (next1.src_reg || next1.dst_reg ||
+ next2.src_reg || next2.dst_reg)
+ continue;
+
+ if (next1.imm != 0x20 || next2.imm != 0x20)
+ continue;
+
+ meta2->skip = true;
+ meta3->skip = true;
+ }
+}
+
+static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
+{
+ int ret;
+
+ nfp_bpf_opt_reg_init(nfp_prog);
+
+ ret = nfp_bpf_opt_reg_rename(nfp_prog);
+ if (ret)
+ return ret;
+
+ nfp_bpf_opt_ld_mask(nfp_prog);
+ nfp_bpf_opt_ld_shift(nfp_prog);
+
+ return 0;
+}
+
+/**
+ * nfp_bpf_jit() - translate BPF code into NFP assembly
+ * @filter: kernel BPF filter struct
+ * @prog_mem: memory to store assembler instructions
+ * @act: action attached to this eBPF program
+ * @prog_start: offset of the first instruction when loaded
+ * @prog_done: where to jump on exit
+ * @prog_sz: size of @prog_mem in instructions
+ * @res: achieved parameters of translation results
+ */
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
+ enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res)
+{
+ struct nfp_prog *nfp_prog;
+ int ret;
+
+ nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
+ if (!nfp_prog)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&nfp_prog->insns);
+ nfp_prog->act = act;
+ nfp_prog->start_off = prog_start;
+ nfp_prog->tgt_done = prog_done;
+
+ ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
+ if (ret)
+ goto out;
+
+ ret = nfp_prog_verify(nfp_prog, filter);
+ if (ret)
+ goto out;
+
+ ret = nfp_bpf_optimize(nfp_prog);
+ if (ret)
+ goto out;
+
+ if (nfp_prog->num_regs <= 7)
+ nfp_prog->regs_per_thread = 16;
+ else
+ nfp_prog->regs_per_thread = 32;
+
+ nfp_prog->prog = prog_mem;
+ nfp_prog->__prog_alloc_len = prog_sz;
+
+ ret = nfp_translate(nfp_prog);
+ if (ret) {
+ pr_err("Translation failed with error %d (translated: %u)\n",
+ ret, nfp_prog->n_translated);
+ ret = -EINVAL;
+ }
+
+ res->n_instr = nfp_prog->prog_len;
+ res->dense_mode = nfp_prog->num_regs <= 7;
+out:
+ nfp_prog_free(nfp_prog);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
new file mode 100644
index 000000000000..144cae87f63a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
+#include <linux/pkt_cls.h>
+
+#include "nfp_bpf.h"
+
+/* Analyzer/verifier definitions */
+struct nfp_bpf_analyzer_priv {
+ struct nfp_prog *prog;
+ struct nfp_insn_meta *meta;
+};
+
+static struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns)
+{
+ unsigned int forward, backward, i;
+
+ backward = meta->n - insn_idx;
+ forward = insn_idx - meta->n;
+
+ if (min(forward, backward) > n_insns - insn_idx - 1) {
+ backward = n_insns - insn_idx - 1;
+ meta = nfp_prog_last_meta(nfp_prog);
+ }
+ if (min(forward, backward) > insn_idx && backward > insn_idx) {
+ forward = insn_idx;
+ meta = nfp_prog_first_meta(nfp_prog);
+ }
+
+ if (forward < backward)
+ for (i = 0; i < forward; i++)
+ meta = nfp_meta_next(meta);
+ else
+ for (i = 0; i < backward; i++)
+ meta = nfp_meta_prev(meta);
+
+ return meta;
+}
+
+static int
+nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+
+ if (reg0->type != CONST_IMM) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act != NN_ACT_DIRECT &&
+ reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT &&
+ reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN &&
+ reg0->imm != TC_ACT_QUEUED) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env, u8 reg)
+{
+ if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+{
+ struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
+ struct nfp_insn_meta *meta = priv->meta;
+
+ meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
+ priv->meta = meta;
+
+ if (meta->insn.src_reg == BPF_REG_10 ||
+ meta->insn.dst_reg == BPF_REG_10) {
+ pr_err("stack not yet supported\n");
+ return -EINVAL;
+ }
+ if (meta->insn.src_reg >= MAX_BPF_REG ||
+ meta->insn.dst_reg >= MAX_BPF_REG) {
+ pr_err("program uses extended registers - jit hardening?\n");
+ return -EINVAL;
+ }
+
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT))
+ return nfp_bpf_check_exit(priv->prog, env);
+
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.src_reg);
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.dst_reg);
+
+ return 0;
+}
+
+static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+ .insn_hook = nfp_verify_insn,
+};
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
+{
+ struct nfp_bpf_analyzer_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->prog = nfp_prog;
+ priv->meta = nfp_prog_first_meta(nfp_prog);
+
+ ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
+
+ kfree(priv);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 690635660195..ed824e11a1e3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -62,6 +62,9 @@
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
+/* Interval for reading offloaded filter stats */
+#define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
+
/* Bar allocation */
#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
@@ -220,7 +223,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -266,6 +269,8 @@ struct nfp_net_rx_desc {
};
};
+#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
@@ -405,6 +410,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
fw_ver->minor == minor;
}
+struct nfp_stat_pair {
+ u64 pkts;
+ u64 bytes;
+};
+
/**
* struct nfp_net - NFP network device structure
* @pdev: Backpointer to PCI device
@@ -413,6 +423,7 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @is_vf: Is the driver attached to a VF?
* @is_nfp3200: Is the driver for a NFP-3200 card?
* @fw_loaded: Is the firmware loaded?
+ * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -427,6 +438,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
+ * @rx_filter: Filter offload statistics - dropped packets/bytes
+ * @rx_filter_prev: Filter offload statistics - values from previous update
+ * @rx_filter_change: Jiffies when statistics last changed
+ * @rx_filter_stats_timer: Timer for polling filter offload statistics
+ * @rx_filter_lock: Lock protecting timer state changes (teardown)
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings
@@ -473,6 +489,7 @@ struct nfp_net {
unsigned is_vf:1;
unsigned is_nfp3200:1;
unsigned fw_loaded:1;
+ unsigned bpf_offload_skip_sw:1;
u32 ctrl;
u32 fl_bufsz;
@@ -502,6 +519,11 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
+ struct nfp_stat_pair rx_filter, rx_filter_prev;
+ unsigned long rx_filter_change;
+ struct timer_list rx_filter_stats_timer;
+ spinlock_t rx_filter_lock;
+
int max_tx_rings;
int max_rx_rings;
@@ -561,12 +583,28 @@ struct nfp_net {
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
+static inline u16 nn_readb(struct nfp_net *nn, int off)
+{
+ return readb(nn->ctrl_bar + off);
+}
+
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
writeb(val, nn->ctrl_bar + off);
}
-/* NFP-3200 can't handle 16-bit accesses too well - hence no readw/writew */
+/* NFP-3200 can't handle 16-bit accesses too well */
+static inline u16 nn_readw(struct nfp_net *nn, int off)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ return readw(nn->ctrl_bar + off);
+}
+
+static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ writew(val, nn->ctrl_bar + off);
+}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
@@ -757,4 +795,9 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
}
#endif /* CONFIG_NFP_NET_DEBUG */
+void nfp_net_filter_stats_timer(unsigned long data);
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf);
+
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 252e4924de0f..aee3fd2b6538 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -60,6 +60,7 @@
#include <linux/ktime.h>
+#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include "nfp_net_ctrl.h"
@@ -1292,38 +1293,72 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
}
}
-/**
- * nfp_net_set_hash() - Set SKB hash data
- * @netdev: adapter's net_device structure
- * @skb: SKB to set the hash data on
- * @rxd: RX descriptor
- *
- * The RSS hash and hash-type are pre-pended to the packet data.
- * Extract and decode it and set the skb fields.
- */
static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+ unsigned int type, __be32 *hash)
{
- struct nfp_net_rx_hash *rx_hash;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) ||
- !(netdev->features & NETIF_F_RXHASH))
+ if (!(netdev->features & NETIF_F_RXHASH))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
- switch (be32_to_cpu(rx_hash->hash_type)) {
+ switch (type) {
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
break;
default:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
break;
}
}
+static void
+nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
+ struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
+
+ nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+static void *
+nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
+ int meta_len)
+{
+ u8 *data = skb->data - meta_len;
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_net_set_hash(netdev, skb,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ skb->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ default:
+ return NULL;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data;
+}
+
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1438,14 +1473,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_reserve(skb, nn->rx_offset);
skb_put(skb, data_len - meta_len);
- nfp_net_set_hash(nn->netdev, skb, rxd);
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
r_vec->rx_bytes += skb->len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (nn->fw_ver.major <= 3) {
+ nfp_net_set_hash_desc(nn->netdev, skb, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
+ if (unlikely(end != skb->data)) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ dev_kfree_skb_any(skb);
+ nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
+ continue;
+ }
+ }
+
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, nn->netdev);
@@ -2044,12 +2094,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
- if (!nn->rx_rings)
+ if (!nn->rx_rings) {
+ err = -ENOMEM;
goto err_free_lsc;
+ }
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
- if (!nn->tx_rings)
+ if (!nn->tx_rings) {
+ err = -ENOMEM;
goto err_free_rx_rings;
+ }
for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
@@ -2382,6 +2436,31 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
return stats;
}
+static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+{
+ if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
+ return true;
+ return false;
+}
+
+static int
+nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -ENOTSUPP;
+ if (proto != htons(ETH_P_ALL))
+ return -ENOTSUPP;
+
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
+ return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
+
+ return -EINVAL;
+}
+
static int nfp_net_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2436,6 +2515,11 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
+ if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ nn_err(nn, "Cannot disable HW TC offload while in use\n");
+ return -EBUSY;
+ }
+
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
@@ -2585,6 +2669,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
+ .ndo_setup_tc = nfp_net_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
@@ -2610,7 +2695,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2627,7 +2712,8 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
- nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "");
+ nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
+ nfp_net_ebpf_capable(nn) ? "BPF " : "");
}
/**
@@ -2670,10 +2756,13 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
+ spin_lock_init(&nn->rx_filter_lock);
spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
+ setup_timer(&nn->rx_filter_stats_timer,
+ nfp_net_filter_stats_timer, (unsigned long)nn);
return nn;
}
@@ -2795,6 +2884,9 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->features = netdev->hw_features;
+ if (nfp_net_ebpf_capable(nn))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ad6c4e31cedd..93b10b441acb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -66,6 +66,13 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
/**
+ * Prepend field types
+ */
+#define NFP_NET_META_FIELD_SIZE 4
+#define NFP_NET_META_HASH 1 /* next field carries hash type */
+#define NFP_NET_META_MARK 2
+
+/**
* Hash type pre-pended when a RSS hash was computed
*/
#define NFP_NET_RSS_NONE 0
@@ -123,6 +130,7 @@
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
+#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -134,6 +142,7 @@
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -196,10 +205,37 @@
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
- * 64B reserved for future use (0x0080 - 0x00c0)
+ * NFP6000 - BPF section
+ * @NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * @NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
-#define NFP_NET_CFG_RESERVED 0x0080
-#define NFP_NET_CFG_RESERVED_SZ 0x0040
+#define NFP_NET_CFG_BPF_ABI 0x0080
+#define NFP_NET_BPF_ABI 1
+#define NFP_NET_CFG_BPF_CAP 0x0081
+#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
+#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
+#define NFP_NET_CFG_BPF_START 0x0084
+#define NFP_NET_CFG_BPF_DONE 0x0086
+#define NFP_NET_CFG_BPF_STACK_SZ 0x0088
+#define NFP_NET_CFG_BPF_INL_MTU 0x0089
+#define NFP_NET_CFG_BPF_SIZE 0x008e
+#define NFP_NET_CFG_BPF_ADDR 0x0090
+#define NFP_NET_CFG_BPF_CFG_8CTX (1 << 0) /* 8ctx mode */
+#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
+#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
+
+/**
+ * 40B reserved for future use (0x0098 - 0x00c0)
+ */
+#define NFP_NET_CFG_RESERVED 0x0098
+#define NFP_NET_CFG_RESERVED_SZ 0x0028
/**
* RSS configuration (0x0100 - 0x01ac):
@@ -303,6 +339,15 @@
#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
+#define NFP_NET_CFG_STATS_APP0_FRAMES (NFP_NET_CFG_STATS_BASE + 0x90)
+#define NFP_NET_CFG_STATS_APP0_BYTES (NFP_NET_CFG_STATS_BASE + 0x98)
+#define NFP_NET_CFG_STATS_APP1_FRAMES (NFP_NET_CFG_STATS_BASE + 0xa0)
+#define NFP_NET_CFG_STATS_APP1_BYTES (NFP_NET_CFG_STATS_BASE + 0xa8)
+#define NFP_NET_CFG_STATS_APP2_FRAMES (NFP_NET_CFG_STATS_BASE + 0xb0)
+#define NFP_NET_CFG_STATS_APP2_BYTES (NFP_NET_CFG_STATS_BASE + 0xb8)
+#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
+#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
+
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 4c9897220969..3418f2277e9d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -106,6 +106,18 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
{"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
{"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
{"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
+
+ {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
+ {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
+ /* see comments in outro functions in nfp_bpf_jit.c to find out
+ * how different BPF modes use app-specific counters
+ */
+ {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
+ {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
+ {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
+ {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
+ {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
+ {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
new file mode 100644
index 000000000000..8acfb631a0ea
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_net_offload.c
+ * Netronome network device driver: TC offload functions for PF and VF
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "nfp_bpf.h"
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+void nfp_net_filter_stats_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+ struct nfp_stat_pair latest;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ mod_timer(&nn->rx_filter_stats_timer,
+ jiffies + NFP_NET_STAT_POLL_IVL);
+
+ spin_unlock_bh(&nn->rx_filter_lock);
+
+ latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+
+ if (latest.pkts != nn->rx_filter.pkts)
+ nn->rx_filter_change = jiffies;
+
+ nn->rx_filter = latest;
+}
+
+static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
+{
+ nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ nn->rx_filter_prev = nn->rx_filter;
+ nn->rx_filter_change = jiffies;
+}
+
+static int
+nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 bytes, pkts;
+
+ pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
+ bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
+ bytes -= pkts * ETH_HLEN;
+
+ nn->rx_filter_prev = nn->rx_filter;
+
+ preempt_disable();
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int
+nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ /* TC direct action */
+ if (cls_bpf->exts_integrated) {
+ if (tc_no_actions(cls_bpf->exts))
+ return NN_ACT_DIRECT;
+
+ return -ENOTSUPP;
+ }
+
+ /* TC legacy mode */
+ if (!tc_single_action(cls_bpf->exts))
+ return -ENOTSUPP;
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a))
+ return NN_ACT_TC_DROP;
+
+ if (is_tcf_mirred_redirect(a) &&
+ tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ return NN_ACT_TC_REDIR;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int
+nfp_net_bpf_offload_prepare(struct nfp_net *nn,
+ struct tc_cls_bpf_offload *cls_bpf,
+ struct nfp_bpf_result *res,
+ void **code, dma_addr_t *dma_addr, u16 max_instr)
+{
+ unsigned int code_sz = max_instr * sizeof(u64);
+ enum nfp_bpf_action_type act;
+ u16 start_off, done_off;
+ unsigned int max_mtu;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return -ENOTSUPP;
+
+ ret = nfp_net_bpf_get_act(nn, cls_bpf);
+ if (ret < 0)
+ return ret;
+ act = ret;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (max_mtu < nn->netdev->mtu) {
+ nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ return -ENOTSUPP;
+ }
+
+ start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
+ *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
+ GFP_KERNEL);
+ if (!*code)
+ return -ENOMEM;
+
+ ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
+ max_instr, res);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ return ret;
+}
+
+static void
+nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
+ void *code, dma_addr_t dma_addr,
+ unsigned int code_sz, unsigned int n_instr,
+ bool dense_mode)
+{
+ u64 bpf_addr = dma_addr;
+ int err;
+
+ nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+
+ if (dense_mode)
+ bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
+
+ nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
+ nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
+
+ /* Load up the JITed code */
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
+ if (err)
+ nn_err(nn, "FW command error while loading BPF: %d\n", err);
+
+ /* Enable passing packets through BPF function */
+ nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+
+ dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+
+ nfp_net_bpf_stats_reset(nn);
+ mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
+}
+
+static int nfp_net_bpf_stop(struct nfp_net *nn)
+{
+ if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ return 0;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+ nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ spin_unlock_bh(&nn->rx_filter_lock);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+
+ del_timer_sync(&nn->rx_filter_stats_timer);
+ nn->bpf_offload_skip_sw = 0;
+
+ return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+}
+
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct nfp_bpf_result res;
+ dma_addr_t dma_addr;
+ u16 max_instr;
+ void *code;
+ int err;
+
+ max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ /* There is nothing stopping us from implementing seamless
+ * replace but the simple method of loading I adopted in
+ * the firmware does not handle atomic replace (i.e. we have to
+ * stop the BPF offload and re-enable it). Leaking-in a few
+ * frames which didn't have BPF applied in the hardware should
+ * be fine if software fallback is available, though.
+ */
+ if (nn->bpf_offload_skip_sw)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_stop(nn);
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_ADD:
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_stop(nn);
+
+ case TC_CLSBPF_STATS:
+ return nfp_net_bpf_stats_update(nn, cls_bpf);
+
+ default:
+ return -ENOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index f7062cb648e1..2800bbf65a89 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -148,7 +148,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 3:
+ case 1 ... 4:
if (is_nfp3200) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6ba48406899e..0df1391f9663 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -88,6 +88,9 @@ config QED
---help---
This enables the support for ...
+config QED_LL2
+ bool
+
config QED_SRIOV
bool "QLogic QED 25/40/100Gb SR-IOV support"
depends on QED && PCI_IOV
@@ -104,4 +107,15 @@ config QEDE
---help---
This enables the support for ...
+config INFINIBAND_QEDR
+ tristate "QLogic qede RoCE sources [debug]"
+ depends on QEDE && 64BIT
+ select QED_LL2
+ default n
+ ---help---
+ This provides a temporary node that allows the compilation
+ and logical testing of the InfiniBand over Ethernet support
+ for QLogic QED. This would be replaced by the 'real' option
+ once the QEDR driver is added [+relocated].
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 86a5b4f5f870..cda0af7fbc20 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -4,3 +4,5 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
+qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 0929582fc82b..653bb5735f0c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -35,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
+#define QED_WID_SIZE (1024)
+#define QED_PF_DEMS_SIZE (4)
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -50,6 +53,14 @@ enum qed_mcp_protocol_type;
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * QED_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
@@ -72,6 +83,7 @@ struct qed_sb_info;
struct qed_sb_attn_info;
struct qed_cxt_mngr;
struct qed_sb_sp_info;
+struct qed_ll2_info;
struct qed_mcp_info;
struct qed_rt_data {
@@ -151,13 +163,17 @@ enum QED_RESOURCES {
QED_RL,
QED_MAC,
QED_VLAN,
+ QED_RDMA_CNQ_RAM,
QED_ILT,
+ QED_LL2_QUEUE,
+ QED_RDMA_STATS_QUEUE,
QED_MAX_RESC,
};
enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
+ QED_RDMA_CNQ,
QED_MAX_FEATURES,
};
@@ -360,6 +376,9 @@ struct qed_hwfn {
struct qed_sb_attn_info *p_sb_attn;
/* Protocol related */
+ bool using_ll2;
+ struct qed_ll2_info *p_ll2_info;
+ struct qed_rdma_info *p_rdma_info;
struct qed_pf_params pf_params;
bool b_rdma_enabled_in_prs;
@@ -398,6 +417,17 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+
+ /* This is used to calculate the doorbell address */
+ u32 dpi_start_offset;
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -407,6 +437,7 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
+ struct qed_roce_ll2_info *ll2;
};
struct pci_params {
@@ -431,6 +462,8 @@ struct qed_int_params {
bool fp_initialized;
u8 fp_msix_base;
u8 fp_msix_cnt;
+ u8 rdma_msix_base;
+ u8 rdma_msix_cnt;
};
struct qed_dbg_feature {
@@ -537,7 +570,6 @@ struct qed_dev {
bool b_is_vf;
u32 drv_type;
-
struct qed_eth_stats *reset_stats;
struct qed_fw_data *fw_data;
@@ -564,7 +596,16 @@ struct qed_dev {
struct qed_dbg_params dbg_params;
+#ifdef CONFIG_QED_LL2
+ struct qed_cb_ll2_info *ll2;
+ u8 ll2_mac_address[ETH_ALEN];
+#endif
+
const struct firmware *firmware;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
};
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dd579b2ef224..82370a1a59ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -48,7 +48,13 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
+#define ILT_DEFAULT_HW_P_SIZE 4
+#else
#define ILT_DEFAULT_HW_P_SIZE 3
+#endif
+
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -1839,6 +1845,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
+ if (p_hwfn->using_ll2)
+ core_cids += 4;
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c6f6f2e8192d..2b8bdaa77800 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -170,6 +170,13 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid);
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
#define QED_CTX_FL_MEM 1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 13d8b4075b01..754f6a908858 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -29,14 +29,19 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
+#include "qed_roce.h"
static DEFINE_SPINLOCK(qm_lock);
+#define QED_MIN_DPIS (4)
+#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
+
/* API common to all protocols */
enum BAR_ID {
BAR_ID_0, /* used for GRC */
@@ -147,6 +152,9 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+#ifdef CONFIG_QED_LL2
+ qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -403,6 +411,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
int qed_resc_alloc(struct qed_dev *cdev)
{
+#ifdef CONFIG_QED_LL2
+ struct qed_ll2_info *p_ll2_info;
+#endif
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
@@ -513,6 +524,15 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2) {
+ p_ll2_info = qed_ll2_alloc(p_hwfn);
+ if (!p_ll2_info)
+ goto alloc_no_mem;
+ p_hwfn->p_ll2_info = p_ll2_info;
+ }
+#endif
+
/* DMA info initialization */
rc = qed_dmae_info_alloc(p_hwfn);
if (rc)
@@ -561,6 +581,10 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2)
+ qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
}
}
@@ -767,6 +791,136 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
return rc;
}
+static int
+qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+ u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+ u32 dpi_bit_shift, dpi_count;
+ u32 min_dpis;
+
+ /* Calculate DPI size */
+ dpi_page_size_1 = QED_WID_SIZE * n_cpus;
+ dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
+ dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
+ dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ dpi_bit_shift = ilog2(dpi_page_size / 4096);
+
+ dpi_count = pwm_region_size / dpi_page_size;
+
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
+
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return -EINVAL;
+
+ return 0;
+}
+
+enum QED_ROCE_EDPM_MODE {
+ QED_ROCE_EDPM_MODE_ENABLE = 0,
+ QED_ROCE_EDPM_MODE_FORCE_ON = 1,
+ QED_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+static int
+qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ int rc = 0;
+ u8 cond;
+
+ db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ if (p_hwfn->cdev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions */
+ non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ NULL) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ NULL);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->cdev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ if (pwm_regsize < QED_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->cdev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize,
+ QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
+ */
+ n_cpus = num_active_cpus();
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ }
+
+ cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ if (cond)
+ qed_rdma_dpm_bar(p_hwfn, p_ptt);
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ norm_regsize,
+ pwm_regsize,
+ p_hwfn->dpi_size,
+ p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
+ p_hwfn->dpi_count,
+ p_hwfn->pf_params.rdma_pf_params.min_dpis);
+ return -EINVAL;
+ }
+
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return 0;
+}
+
static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
@@ -840,6 +994,10 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+ rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -1264,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
+ * status blocks equally between L2 / RoCE but with consideration as
+ * to how many l2 queues / cnqs we have
+ */
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_features++;
+
+ feat_num[QED_RDMA_CNQ] =
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
+ }
+#endif
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
@@ -1304,6 +1475,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
+ resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
+ num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
resc_start[i] = resc_num[i] * enabled_func_idx;
@@ -1327,7 +1502,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
"RL = %d start = %d\n"
"MAC = %d start = %d\n"
"VLAN = %d start = %d\n"
- "ILT = %d start = %d\n",
+ "ILT = %d start = %d\n"
+ "LL2_QUEUE = %d start = %d\n",
p_hwfn->hw_info.resc_num[QED_SB],
p_hwfn->hw_info.resc_start[QED_SB],
p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
@@ -1343,7 +1519,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_num[QED_VLAN],
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
- p_hwfn->hw_info.resc_start[QED_ILT]);
+ p_hwfn->hw_info.resc_start[QED_ILT],
+ RESC_NUM(p_hwfn, QED_LL2_QUEUE),
+ RESC_START(p_hwfn, QED_LL2_QUEUE));
return 0;
}
@@ -2133,6 +2311,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
return 0;
}
+static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
+ u8 *p_filter)
+{
+ *p_high = p_filter[1] | (p_filter[0] << 8);
+ *p_low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+}
+
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return 0;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an empty LLH filter to utilize\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is added at %d\n",
+ p_filter, i);
+
+ return 0;
+}
+
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high = 0, low = 0;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ qed_llh_mac_to_filter(&high, &low, p_filter);
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (qed_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "mac: %pM is removed from %d\n",
+ p_filter, i);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
+}
+
static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 hw_addr, void *p_eth_qzone,
size_t eth_qzone_size, u8 timeset)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 343bb0344f62..b6711c106597 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -310,6 +310,26 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 *dst_id);
/**
+ * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_filter);
+
+/**
* *@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 2777d5bb4380..72eee29c677f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,6 +727,9 @@ struct core_tx_bd_flags {
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
+
};
struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644
index 000000000000..a6db10717d5c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -0,0 +1,1792 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/ipv6.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
+#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
+
+#define QED_LL2_TX_SIZE (256)
+#define QED_LL2_RX_SIZE (4096)
+
+struct qed_cb_ll2_info {
+ int rx_cnt;
+ u32 rx_size;
+ u8 handle;
+ bool frags_mapped;
+
+ /* Lock protecting LL2 buffer lists in sleepless context */
+ spinlock_t lock;
+ struct list_head list;
+
+ const struct qed_ll2_cb_ops *cbs;
+ void *cb_cookie;
+};
+
+struct qed_ll2_buffer {
+ struct list_head list;
+ void *data;
+ dma_addr_t phys_addr;
+};
+
+static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct sk_buff *skb = cookie;
+
+ /* All we need to do is release the mapping */
+ dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
+ cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
+ b_last_fragment);
+
+ if (cdev->ll2->frags_mapped)
+ /* Case where mapped frags were received, need to
+ * free skb with nr_frags marked as 0
+ */
+ skb_shinfo(skb)->nr_frags = 0;
+
+ dev_kfree_skb_any(skb);
+}
+
+static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ u8 **data, dma_addr_t *phys_addr)
+{
+ *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ if (!(*data)) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ return -ENOMEM;
+ }
+
+ *phys_addr = dma_map_single(&cdev->pdev->dev,
+ ((*data) + NET_SKB_PAD),
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
+ DP_INFO(cdev, "Failed to map LL2 buffer data\n");
+ kfree((*data));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
+ struct qed_ll2_buffer *buffer)
+{
+ spin_lock_bh(&cdev->ll2->lock);
+
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+
+ cdev->ll2->rx_cnt--;
+ if (!cdev->ll2->rx_cnt)
+ DP_INFO(cdev, "All LL2 entries were removed\n");
+
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ return 0;
+}
+
+static void qed_ll2_kill_buffers(struct qed_dev *cdev)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
+{
+ u16 packet_length = le16_to_cpu(p_cqe->packet_length);
+ struct qed_ll2_buffer *buffer = p_pkt->cookie;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 vlan = le16_to_cpu(p_cqe->vlan);
+ u32 opaque_data_0, opaque_data_1;
+ u8 pad = p_cqe->placement_offset;
+ dma_addr_t new_phys_addr;
+ struct sk_buff *skb;
+ bool reuse = false;
+ int rc = -EINVAL;
+ u8 *new_data;
+
+ opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
+ opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
+ "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
+ (u64)p_pkt->rx_buf_addr, pad, packet_length,
+ le16_to_cpu(p_cqe->parse_flags.flags), vlan,
+ opaque_data_0, opaque_data_1);
+
+ if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buffer->data, packet_length, false);
+ }
+
+ /* Determine if data is valid */
+ if (packet_length < ETH_HLEN)
+ reuse = true;
+
+ /* Allocate a replacement for buffer; Reuse upon failure */
+ if (!reuse)
+ rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
+ &new_phys_addr);
+
+ /* If need to reuse or there's no replacement buffer, repost this */
+ if (rc)
+ goto out_post;
+
+ skb = build_skb(buffer->data, 0);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out_post;
+ }
+
+ pad += NET_SKB_PAD;
+ skb_reserve(skb, pad);
+ skb_put(skb, packet_length);
+ skb_checksum_none_assert(skb);
+
+ /* Get parital ethernet information instead of eth_type_trans(),
+ * Since we don't have an associated net_device.
+ */
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ /* Pass SKB onward */
+ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
+ if (vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
+ cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+ opaque_data_0, opaque_data_1);
+ }
+
+ /* Update Buffer information and update FW producer */
+ buffer->data = new_data;
+ buffer->phys_addr = new_phys_addr;
+
+out_post:
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+
+ if (rc)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ bool b_lock,
+ bool b_only_active)
+{
+ struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
+
+ if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
+ return NULL;
+
+ if (!p_hwfn->p_ll2_info)
+ return NULL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ if (b_only_active) {
+ if (b_lock)
+ mutex_lock(&p_ll2_conn->mutex);
+ if (p_ll2_conn->b_active)
+ p_ret = p_ll2_conn;
+ if (b_lock)
+ mutex_unlock(&p_ll2_conn->mutex);
+ } else {
+ p_ret = p_ll2_conn;
+ }
+
+ return p_ret;
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
+ *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
+}
+
+static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ bool b_last_packet = false, b_last_frag = false;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_queue *p_tx;
+ dma_addr_t tx_frag;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_tx = &p_ll2_conn->tx_queue;
+
+ while (!list_empty(&p_tx->active_descq)) {
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ b_last_packet = list_empty(&p_tx->active_descq);
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ p_tx->cur_completing_packet = *p_pkt;
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_release_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+
+ }
+}
+
+static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
+ struct qed_ll2_tx_packet *p_pkt;
+ bool b_last_frag = false;
+ unsigned long flags;
+ dma_addr_t tx_frag;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->b_completing_packet) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ goto out;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ goto out;
+
+ p_tx->b_completing_packet = true;
+ p_tx->cur_completing_packet = *p_pkt;
+ num_bds_in_packet = p_pkt->bd_used;
+ list_del(&p_pkt->list_entry);
+
+ if (num_bds < num_bds_in_packet) {
+ DP_NOTICE(p_hwfn,
+ "Rest of BDs does not cover whole packet\n");
+ goto out;
+ }
+
+ num_bds -= num_bds_in_packet;
+ p_tx->bds_idx += num_bds_in_packet;
+ while (num_bds_in_packet--)
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ if (p_ll2_conn->gsi_enable)
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ else
+ qed_ll2b_complete_tx_packet(p_hwfn,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag, !num_bds);
+ spin_lock_irqsave(&p_tx->lock, flags);
+ }
+
+ p_tx->b_completing_packet = false;
+ rc = 0;
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+static int
+qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags, bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ u16 packet_length, parse_flags, vlan;
+ u32 src_mac_addrhi;
+ u16 src_mac_addrlo;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "GSI Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+
+ list_del(&p_pkt->list_entry);
+ parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_gsi_packet(p_hwfn,
+ p_ll2_info->my_id,
+ p_pkt->cookie,
+ p_pkt->rx_buf_addr,
+ packet_length,
+ p_cqe->rx_cqe_gsi.data_length_error,
+ parse_flags,
+ vlan,
+ src_mac_addrhi,
+ src_mac_addrlo, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long lock_flags,
+ bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt) {
+ DP_NOTICE(p_hwfn,
+ "LL2 Rx completion but active_descq is empty\n");
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+
+ if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
+ p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
+ spin_lock_irqsave(&p_rx->lock, lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = cookie;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ while (cq_new_idx != cq_old_idx) {
+ bool b_last_cqe = (cq_new_idx == cq_old_idx);
+
+ cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
+ cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
+
+ switch (cqe->rx_cqe_sp.type) {
+ case CORE_RX_CQE_TYPE_SLOW_PATH:
+ DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
+ rc = -EINVAL;
+ break;
+ case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+ rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ case CORE_RX_CQE_TYPE_REGULAR:
+ rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
+ cqe, flags, b_last_cqe);
+ break;
+ default:
+ rc = -EIO;
+ }
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_rx_queue *p_rx;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_rx = &p_ll2_conn->rx_queue;
+
+ while (!list_empty(&p_rx->active_descq)) {
+ dma_addr_t rx_buf_addr;
+ void *cookie;
+ bool b_last;
+
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ rx_buf_addr = p_pkt->rx_buf_addr;
+ cookie = p_pkt->cookie;
+
+ b_last = list_empty(&p_rx->active_descq);
+ }
+}
+
+static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ u8 action_on_error)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 cqe_pbl_size;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_rx->rx_sb_index;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base,
+ p_rx->rxq_chain.p_phys_addr);
+ cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
+ qed_chain_get_pbl_phys(&p_rx->rcq_chain));
+
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+ p_ramrod->main_func_queue = 1;
+
+ if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
+ p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
+ p_ramrod->mf_si_bcast_accept_all = 1;
+ p_ramrod->mf_si_mcast_accept_all = 1;
+ } else {
+ p_ramrod->mf_si_bcast_accept_all = 0;
+ p_ramrod->mf_si_mcast_accept_all = 0;
+ }
+
+ p_ramrod->action_on_error.error_type = action_on_error;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct core_tx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params pq_params;
+ u16 pq_id = 0, pbl_size;
+ int rc = -EINVAL;
+
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_tx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_tx->tx_sb_index;
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ll2_conn->tx_stats_en = 1;
+ p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
+ p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
+
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
+ qed_chain_get_pbl_phys(&p_tx->txq_chain));
+ pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ switch (conn_type) {
+ case QED_LL2_TYPE_ISCSI:
+ case QED_LL2_TYPE_ISCSI_OOO:
+ p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ break;
+ case QED_LL2_TYPE_ROCE:
+ p_ramrod->conn_type = PROTOCOLID_ROCE;
+ break;
+ default:
+ p_ramrod->conn_type = PROTOCOLID_ETH;
+ DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
+ }
+
+ p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct core_rx_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
+
+ p_ramrod->complete_event_flg = 1;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
+{
+ struct qed_ll2_rx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!rx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_bd),
+ &p_ll2_info->rx_queue.rxq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
+ goto out;
+ }
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
+ goto out;
+ }
+ p_ll2_info->rx_queue.descq_array = p_descq;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ rx_num_desc,
+ sizeof(struct core_rx_fast_path_cqe),
+ &p_ll2_info->rx_queue.rcq_chain);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, rx_num_desc);
+
+out:
+ return rc;
+}
+
+static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info,
+ u16 tx_num_desc)
+{
+ struct qed_ll2_tx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!tx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ tx_num_desc,
+ sizeof(struct core_tx_bd),
+ &p_ll2_info->tx_queue.txq_chain);
+ if (rc)
+ goto out;
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ p_ll2_info->tx_queue.descq_array = p_descq;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->conn_type, tx_num_desc);
+
+out:
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
+ tx_num_desc);
+ return rc;
+}
+
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle)
+{
+ qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
+ struct qed_ll2_info *p_ll2_info = NULL;
+ int rc;
+ u8 i;
+
+ if (!p_connection_handle || !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ /* Find a free connection to be used */
+ for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
+ mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
+ if (p_hwfn->p_ll2_info[i].b_active) {
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ continue;
+ }
+
+ p_hwfn->p_ll2_info[i].b_active = true;
+ p_ll2_info = &p_hwfn->p_ll2_info[i];
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ break;
+ }
+ if (!p_ll2_info)
+ return -EBUSY;
+
+ p_ll2_info->conn_type = p_params->conn_type;
+ p_ll2_info->mtu = p_params->mtu;
+ p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
+ p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
+ p_ll2_info->tx_tc = p_params->tx_tc;
+ p_ll2_info->tx_dest = p_params->tx_dest;
+ p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
+ p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
+ p_ll2_info->gsi_enable = p_params->gsi_enable;
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
+ if (rc)
+ goto q_allocate_fail;
+
+ /* Register callbacks for the Rx/Tx queues */
+ comp_rx_cb = qed_ll2_rxq_completion;
+ comp_tx_cb = qed_ll2_txq_completion;
+
+ if (rx_num_desc) {
+ qed_int_register_cb(p_hwfn, comp_rx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->rx_queue.rx_sb_index,
+ &p_ll2_info->rx_queue.p_fw_cons);
+ p_ll2_info->rx_queue.b_cb_registred = true;
+ }
+
+ if (tx_num_desc) {
+ qed_int_register_cb(p_hwfn,
+ comp_tx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->tx_queue.tx_sb_index,
+ &p_ll2_info->tx_queue.p_fw_cons);
+ p_ll2_info->tx_queue.b_cb_registred = true;
+ }
+
+ *p_connection_handle = i;
+ return rc;
+
+q_allocate_fail:
+ qed_ll2_release_connection(p_hwfn, i);
+ return -ENOMEM;
+}
+
+static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ u8 action_on_error = 0;
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
+
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
+ p_ll2_conn->ai_err_packet_too_big);
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+
+ return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+}
+
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ struct qed_ll2_tx_queue *p_tx;
+ int rc = -EINVAL;
+ u32 i, capacity;
+ u8 qid;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+ p_tx = &p_ll2_conn->tx_queue;
+
+ qed_chain_reset(&p_rx->rxq_chain);
+ qed_chain_reset(&p_rx->rcq_chain);
+ INIT_LIST_HEAD(&p_rx->active_descq);
+ INIT_LIST_HEAD(&p_rx->free_descq);
+ INIT_LIST_HEAD(&p_rx->posting_descq);
+ spin_lock_init(&p_rx->lock);
+ capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_rx->descq_array[i].list_entry,
+ &p_rx->free_descq);
+ *p_rx->p_fw_cons = 0;
+
+ qed_chain_reset(&p_tx->txq_chain);
+ INIT_LIST_HEAD(&p_tx->active_descq);
+ INIT_LIST_HEAD(&p_tx->free_descq);
+ INIT_LIST_HEAD(&p_tx->sending_descq);
+ spin_lock_init(&p_tx->lock);
+ capacity = qed_chain_get_capacity(&p_tx->txq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_tx->descq_array[i].list_entry,
+ &p_tx->free_descq);
+ p_tx->cur_completing_bd_idx = 0;
+ p_tx->bds_idx = 0;
+ p_tx->b_completing_packet = false;
+ p_tx->cur_send_packet = NULL;
+ p_tx->cur_send_frag_num = 0;
+ p_tx->cur_completing_frag_num = 0;
+ *p_tx->p_fw_cons = 0;
+
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
+ p_ll2_conn->queue_id = qid;
+ p_ll2_conn->tx_stats_id = qid;
+ p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_ll2_conn->cid,
+ DQ_DEMS_LEGACY);
+
+ rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+
+ return rc;
+}
+
+static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_rx_queue *p_rx,
+ struct qed_ll2_rx_packet *p_curp)
+{
+ struct qed_ll2_rx_packet *p_posting_packet = NULL;
+ struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
+ bool b_notify_fw = false;
+ u16 bd_prod, cq_prod;
+
+ /* This handles the flushing of already posted buffers */
+ while (!list_empty(&p_rx->posting_descq)) {
+ p_posting_packet = list_first_entry(&p_rx->posting_descq,
+ struct qed_ll2_rx_packet,
+ list_entry);
+ list_del(&p_posting_packet->list_entry);
+ list_add_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ /* This handles the supplied packet [if there is one] */
+ if (p_curp) {
+ list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ if (!b_notify_fw)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
+ cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+}
+
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw)
+{
+ struct core_rx_bd_with_buff_len *p_curb = NULL;
+ struct qed_ll2_rx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags;
+ void *p_data;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ if (!list_empty(&p_rx->free_descq))
+ p_curp = list_first_entry(&p_rx->free_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (p_curp) {
+ if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
+ qed_chain_get_elem_left(&p_rx->rcq_chain)) {
+ p_data = qed_chain_produce(&p_rx->rxq_chain);
+ p_curb = (struct core_rx_bd_with_buff_len *)p_data;
+ qed_chain_produce(&p_rx->rcq_chain);
+ }
+ }
+
+ /* If we're lacking entires, let's try to flush buffers to FW */
+ if (!p_curp || !p_curb) {
+ rc = -EBUSY;
+ p_curp = NULL;
+ goto out_notify;
+ }
+
+ /* We have an Rx packet we can fill */
+ DMA_REGPAIR_LE(p_curb->addr, addr);
+ p_curb->buff_length = cpu_to_le16(buf_len);
+ p_curp->rx_buf_addr = addr;
+ p_curp->cookie = cookie;
+ p_curp->rxq_bd = p_curb;
+ p_curp->buf_length = buf_len;
+ list_del(&p_curp->list_entry);
+
+ /* Check if we only want to enqueue this packet without informing FW */
+ if (!notify_fw) {
+ list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
+ goto out;
+ }
+
+out_notify:
+ qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
+out:
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_tx_queue *p_tx,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *p_cookie,
+ u8 notify_fw)
+{
+ list_del(&p_curp->list_entry);
+ p_curp->cookie = p_cookie;
+ p_curp->bd_used = num_of_bds;
+ p_curp->notify_fw = notify_fw;
+ p_tx->cur_send_packet = p_curp;
+ p_tx->cur_send_frag_num = 0;
+
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
+ p_tx->cur_send_frag_num++;
+}
+
+static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type type,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
+{
+ struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
+ u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
+ struct core_tx_bd *start_bd = NULL;
+ u16 frag_idx;
+
+ start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
+ cpu_to_le16(l4_hdr_offset_w));
+ SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+ start_bd->bd_flags.as_bitfield = bd_flags;
+ start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
+ CORE_TX_BD_FLAGS_START_BD_SHIFT;
+ SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+ DMA_REGPAIR_LE(start_bd->addr, first_frag);
+ start_bd->nbytes = cpu_to_le16(first_frag_len);
+
+ SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
+ type);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
+ p_ll2->queue_id,
+ p_ll2->cid,
+ p_ll2->conn_type,
+ prod_idx,
+ first_frag_len,
+ num_of_bds,
+ le32_to_cpu(start_bd->addr.hi),
+ le32_to_cpu(start_bd->addr.lo));
+
+ if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
+ return;
+
+ /* Need to provide the packet with additional BDs for frags */
+ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
+ frag_idx < num_of_bds; frag_idx++) {
+ struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
+
+ *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bitfield1 = 0;
+ (*p_bd)->bitfield0 = 0;
+ p_curp->bds_set[frag_idx].tx_frag = 0;
+ p_curp->bds_set[frag_idx].frag_len = 0;
+ }
+}
+
+/* This should be called while the Txq spinlock is being held */
+static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct core_db_data db_msg = { 0, 0, 0 };
+ u16 bd_prod;
+
+ /* If there are missing BDs, don't do anything now */
+ if (p_ll2_conn->tx_queue.cur_send_frag_num !=
+ p_ll2_conn->tx_queue.cur_send_packet->bd_used)
+ return;
+
+ /* Push the current packet to the list and clean after it */
+ list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
+ &p_ll2_conn->tx_queue.sending_descq);
+ p_ll2_conn->tx_queue.cur_send_packet = NULL;
+ p_ll2_conn->tx_queue.cur_send_frag_num = 0;
+
+ /* Notify FW of packet only if requested to */
+ if (!b_notify)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
+
+ while (!list_empty(&p_tx->sending_descq)) {
+ p_pkt = list_first_entry(&p_tx->sending_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ }
+
+ SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+ db_msg.spq_prod = cpu_to_le16(bd_prod);
+
+ /* Make sure the BDs data is updated before ringing the doorbell */
+ wmb();
+
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
+ p_ll2_conn->queue_id,
+ p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+}
+
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw)
+{
+ struct qed_ll2_tx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ struct qed_ll2_tx_queue *p_tx;
+ struct qed_chain *p_tx_chain;
+ unsigned long flags;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_tx = &p_ll2_conn->tx_queue;
+ p_tx_chain = &p_tx->txq_chain;
+
+ if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ return -EIO;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->cur_send_packet) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ /* Get entry, but only if we have tx elements for it */
+ if (!list_empty(&p_tx->free_descq))
+ p_curp = list_first_entry(&p_tx->free_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
+ p_curp = NULL;
+
+ if (!p_curp) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ if (qed_roce_flavor == QED_LL2_ROCE) {
+ roce_flavor = CORE_ROCE;
+ } else if (qed_roce_flavor == QED_LL2_RROCE) {
+ roce_flavor = CORE_RROCE;
+ } else {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Prepare packet and BD, and perhaps send a doorbell to FW */
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
+ num_of_bds, first_frag,
+ first_frag_len, cookie, notify_fw);
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
+ num_of_bds, CORE_TX_DEST_NW,
+ vlan, bd_flags, l4_hdr_offset_w,
+ roce_flavor,
+ first_frag, first_frag_len);
+
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes)
+{
+ struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ u16 cur_send_frag_num = 0;
+ struct core_tx_bd *p_bd;
+ unsigned long flags;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ if (!p_ll2_conn->tx_queue.cur_send_packet)
+ return -EINVAL;
+
+ p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
+ cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
+
+ if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ return -EINVAL;
+
+ /* Fill the BD information, and possibly notify FW */
+ p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
+ DMA_REGPAIR_LE(p_bd->addr, addr);
+ p_bd->nbytes = cpu_to_le16(nbytes);
+ p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
+ p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
+
+ p_ll2_conn->tx_queue.cur_send_frag_num++;
+
+ spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+ spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
+
+ return 0;
+}
+
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ int rc = -EINVAL;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+
+ /* Stop Tx & Rx of connection, if needed */
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_txq_flush(p_hwfn, connection_handle);
+ }
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+ qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ }
+
+ return rc;
+}
+
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ }
+
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registred = false;
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ }
+
+ kfree(p_ll2_conn->tx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+ kfree(p_ll2_conn->rx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
+
+ qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+
+ mutex_lock(&p_ll2_conn->mutex);
+ p_ll2_conn->b_active = false;
+ mutex_unlock(&p_ll2_conn->mutex);
+}
+
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ll2_info *p_ll2_connections;
+ u8 i;
+
+ /* Allocate LL2's set struct */
+ p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
+ sizeof(struct qed_ll2_info), GFP_KERNEL);
+ if (!p_ll2_connections) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
+ return NULL;
+ }
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ p_ll2_connections[i].my_id = i;
+
+ return p_ll2_connections;
+}
+
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ int i;
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ mutex_init(&p_ll2_connections[i].mutex);
+}
+
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections)
+{
+ kfree(p_ll2_connections);
+}
+
+static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_tstorm_per_queue_stat tstats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->packet_too_big_discard =
+ HILO_64_REGPAIR(tstats.packet_too_big_discard);
+ p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
+}
+
+static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_ustorm_per_queue_stat ustats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_pstorm_per_queue_stat pstats;
+ u8 stats_id = p_ll2_conn->tx_stats_id;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+}
+
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+ if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
+ !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+ if (p_ll2_conn->tx_stats_en)
+ _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+}
+
+static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie)
+{
+ cdev->ll2->cbs = ops;
+ cdev->ll2->cb_cookie = cookie;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ struct qed_ll2_info ll2_info;
+ struct qed_ll2_buffer *buffer;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ptt *p_ptt;
+ int rc, i;
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+ cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+ cdev->ll2->frags_mapped = params->frags_mapped;
+
+ /*Allocate memory for LL2 */
+ DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
+ cdev->ll2->rx_size);
+ for (i = 0; i < QED_LL2_RX_SIZE; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
+ if (rc) {
+ kfree(buffer);
+ goto fail;
+ }
+
+ list_add_tail(&buffer->list, &cdev->ll2->list);
+ }
+
+ switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ conn_type = QED_LL2_TYPE_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ conn_type = QED_LL2_TYPE_ROCE;
+ break;
+ default:
+ conn_type = QED_LL2_TYPE_TEST;
+ }
+
+ /* Prepare the temporary ll2 information */
+ memset(&ll2_info, 0, sizeof(ll2_info));
+ ll2_info.conn_type = conn_type;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = 0;
+ ll2_info.tx_dest = CORE_TX_DEST_NW;
+ ll2_info.gsi_enable = 1;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
+ QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
+ &cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to acquire LL2 connection\n");
+ goto fail;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(cdev, "Failed to establish LL2 connection\n");
+ goto release_fail;
+ }
+
+ /* Post all Rx buffers to FW */
+ spin_lock_bh(&cdev->ll2->lock);
+ list_for_each_entry(buffer, &cdev->ll2->list, list) {
+ rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+ if (rc) {
+ DP_INFO(cdev,
+ "Failed to post an Rx buffer; Deleting it\n");
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+ kfree(buffer);
+ } else {
+ cdev->ll2->rx_cnt++;
+ }
+ }
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ if (!cdev->ll2->rx_cnt) {
+ DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
+ goto release_terminate;
+ }
+
+ if (!is_valid_ether_addr(params->ll2_mac_address)) {
+ DP_INFO(cdev, "Invalid Ethernet address\n");
+ goto release_terminate;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto release_terminate;
+ }
+
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ params->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ if (rc) {
+ DP_ERR(cdev, "Failed to allocate LLH filter\n");
+ goto release_terminate_all;
+ }
+
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
+
+ return 0;
+
+release_terminate_all:
+
+release_terminate:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+release_fail:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+fail:
+ qed_ll2_kill_buffers(cdev);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+ return -EINVAL;
+}
+
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_INFO(cdev, "Failed to acquire PTT\n");
+ goto fail;
+ }
+
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ cdev->ll2_mac_address);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc;
+fail:
+ return -EINVAL;
+}
+
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
+{
+ const skb_frag_t *frag;
+ int rc = -EINVAL, i;
+ dma_addr_t mapping;
+ u16 vlan = 0;
+ u8 flags = 0;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+ DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+ return -EINVAL;
+ }
+
+ if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+ 1 + skb_shinfo(skb)->nr_frags);
+ return -EINVAL;
+ }
+
+ mapping = dma_map_single(&cdev->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev, "SKB mapping failed\n");
+ return -EINVAL;
+ }
+
+ /* Request HW to calculate IP csum */
+ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
+ ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ if (skb_vlan_tag_present(skb)) {
+ vlan = skb_vlan_tag_get(skb);
+ flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ }
+
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ 1 + skb_shinfo(skb)->nr_frags,
+ vlan, flags, 0, 0 /* RoCE FLAVOR */,
+ mapping, skb->len, skb, 1);
+ if (rc)
+ goto err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ if (!cdev->ll2->frags_mapped) {
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev,
+ mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ goto err;
+ }
+ } else {
+ mapping = page_to_phys(skb_frag_page(frag)) |
+ frag->page_offset;
+ }
+
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle,
+ mapping,
+ skb_frag_size(frag));
+
+ /* if failed not much to do here, partial packet has been posted
+ * we can't free memory, will need to wait for completion.
+ */
+ if (rc)
+ goto err2;
+ }
+
+ return 0;
+
+err:
+ dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
+
+err2:
+ return rc;
+}
+
+static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ if (!cdev->ll2)
+ return -EINVAL;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+}
+
+const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = &qed_ll2_start,
+ .stop = &qed_ll2_stop,
+ .start_xmit = &qed_ll2_start_xmit,
+ .register_cb_ops = &qed_ll2_register_cb_ops,
+ .get_stats = &qed_ll2_stats,
+};
+
+int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
+ return cdev->ll2 ? 0 : -ENOMEM;
+}
+
+void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+ kfree(cdev->ll2);
+ cdev->ll2 = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644
index 000000000000..80a5dc2d652d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -0,0 +1,316 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_H
+#define _QED_LL2_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+
+enum qed_ll2_roce_flavor_type {
+ QED_LL2_ROCE,
+ QED_LL2_RROCE,
+ MAX_QED_LL2_ROCE_FLAVOR_TYPE
+};
+
+enum qed_ll2_conn_type {
+ QED_LL2_TYPE_RESERVED,
+ QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TEST,
+ QED_LL2_TYPE_ISCSI_OOO,
+ QED_LL2_TYPE_RESERVED2,
+ QED_LL2_TYPE_ROCE,
+ QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_RX_CONN_TYPE
+};
+
+struct qed_ll2_rx_packet {
+ struct list_head list_entry;
+ struct core_rx_bd_with_buff_len *rxq_bd;
+ dma_addr_t rx_buf_addr;
+ u16 buf_length;
+ void *cookie;
+ u8 placement_offset;
+ u16 parse_flags;
+ u16 packet_length;
+ u16 vlan;
+ u32 opaque_data[2];
+};
+
+struct qed_ll2_tx_packet {
+ struct list_head list_entry;
+ u16 bd_used;
+ u16 vlan;
+ u16 l4_hdr_offset_w;
+ u8 bd_flags;
+ bool notify_fw;
+ void *cookie;
+
+ struct {
+ struct core_tx_bd *txq_bd;
+ dma_addr_t tx_frag;
+ u16 frag_len;
+ } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+};
+
+struct qed_ll2_rx_queue {
+ /* Lock protecting the Rx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain rxq_chain;
+ struct qed_chain rcq_chain;
+ u8 rx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head posting_descq;
+ struct qed_ll2_rx_packet *descq_array;
+ void __iomem *set_prod_addr;
+};
+
+struct qed_ll2_tx_queue {
+ /* Lock protecting the Tx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain txq_chain;
+ u8 tx_sb_index;
+ bool b_cb_registred;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head sending_descq;
+ struct qed_ll2_tx_packet *descq_array;
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
+ u16 cur_completing_bd_idx;
+ void __iomem *doorbell_addr;
+ u16 bds_idx;
+ u16 cur_send_frag_num;
+ u16 cur_completing_frag_num;
+ bool b_completing_packet;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ enum qed_ll2_conn_type conn_type;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
+ u16 mtu;
+ u8 rx_drop_ttl0_flg;
+ u8 rx_vlan_removal_en;
+ u8 tx_tc;
+ enum core_tx_dest tx_dest;
+ enum core_error_handle ai_err_packet_too_big;
+ enum core_error_handle ai_err_no_buf;
+ u8 tx_stats_en;
+ struct qed_ll2_rx_queue rx_queue;
+ struct qed_ll2_tx_queue tx_queue;
+ u8 gsi_enable;
+};
+
+/**
+ * @brief qed_ll2_acquire_connection - allocate resources,
+ * starts rx & tx (if relevant) queues pair. Provides
+ * connecion handler as output parameter.
+ *
+ * @param p_hwfn
+ * @param p_params Contain various configuration properties
+ * @param rx_num_desc
+ * @param tx_num_desc
+ *
+ * @param p_connection_handle Output container for LL2 connection's handle
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_params,
+ u16 rx_num_desc,
+ u16 tx_num_desc,
+ u8 *p_connection_handle);
+
+/**
+ * @brief qed_ll2_establish_connection - start previously
+ * allocated LL2 queues pair
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param addr rx (physical address) buffers to submit
+ * @param cookie
+ * @param notify_fw produce corresponding Rx BD immediately
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
+ * to prepare Tx packet submission to FW.
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param num_of_bds a number of requested BD equals a number of
+ * fragments in Tx packet
+ * @param vlan VLAN to insert to packet (if insertion set)
+ * @param bd_flags
+ * @param l4_hdr_offset_w L4 Header Offset from start of packet
+ * (in words). This is needed if both l4_csum
+ * and ipv6_ext are set
+ * @param first_frag
+ * @param first_frag_len
+ * @param cookie
+ *
+ * @param notify_fw
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ u8 num_of_bds,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum qed_ll2_roce_flavor_type qed_roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len, void *cookie, u8 notify_fw);
+
+/**
+ * @brief qed_ll2_release_connection - releases resources
+ * allocated for LL2 connection
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ */
+void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ * @param addr
+ * @param nbytes
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes);
+
+/**
+ * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle
+ * obtained from
+ * qed_ll2_require_connection
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
+
+/**
+ * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ *
+ *
+ * @param p_hwfn
+ * @param connection_handle LL2 connection's handle obtained from
+ * qed_ll2_require_connection
+ * @param p_stats
+ *
+ * @return 0 on success, failure otherwise
+ */
+int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
+ u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+/**
+ * @brief qed_ll2_alloc - Allocates LL2 connections set
+ *
+ * @param p_hwfn
+ *
+ * @return pointer to alocated qed_ll2_info or NULL
+ */
+struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ll2_setup - Inits LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+
+/**
+ * @brief qed_ll2_free - Releases LL2 connections set
+ *
+ * @param p_hwfn
+ * @param p_ll2_connections
+ *
+ */
+void qed_ll2_free(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_connections);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index b730a632c383..4ee3151e80c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,15 +22,22 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
+#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#define QED_ROCE_QPS (8192)
+#define QED_ROCE_DPIS (8)
+#endif
+
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -204,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->rdma_supported =
- (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
+ dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
+ QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -608,7 +615,16 @@ static int qed_nic_reset(struct qed_dev *cdev)
static int qed_nic_setup(struct qed_dev *cdev)
{
- int rc;
+ int rc, i;
+
+ /* Determine if interface is going to require LL2 */
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->using_ll2 = true;
+ }
+ }
rc = qed_resc_alloc(cdev);
if (rc)
@@ -666,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ int num_l2_queues;
+#endif
int rc;
int i;
@@ -696,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ num_l2_queues = 0;
+ for_each_hwfn(cdev, i)
+ num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA,
+ "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
+ cdev->int_params.fp_msix_cnt, num_l2_queues);
+
+ if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
+ cdev->int_params.rdma_msix_cnt =
+ (cdev->int_params.fp_msix_cnt - num_l2_queues)
+ / cdev->num_hwfns;
+ cdev->int_params.rdma_msix_base =
+ cdev->int_params.fp_msix_base + num_l2_queues;
+ cdev->int_params.fp_msix_cnt = num_l2_queues;
+ } else {
+ cdev->int_params.rdma_msix_cnt = 0;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
+ cdev->int_params.rdma_msix_cnt,
+ cdev->int_params.rdma_msix_base);
+#endif
+
return 0;
}
@@ -799,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -873,6 +924,12 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ /* Allocate LL2 interface if needed */
+ if (QED_LEADING_HWFN(cdev)->using_ll2) {
+ rc = qed_ll2_alloc_if(cdev);
+ if (rc)
+ goto err3;
+ }
if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
@@ -893,6 +950,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return 0;
+err3:
+ qed_hw_stop(cdev);
err2:
qed_hw_timers_stop_all(cdev);
if (IS_PF(cdev))
@@ -915,6 +974,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
+ qed_ll2_dealloc_if(cdev);
+
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 7d39cb9b775b..bdc9ba92f6d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1181,8 +1181,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
- for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
- val = cpu_to_be32(p_ver->name[i]);
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+ val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 759cb04e02b0..b414a0542177 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -208,6 +208,26 @@
0x50196cUL
#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
0x501964UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE \
+ 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
+ 32
+#define NIG_REG_LLH_FUNC_FILTER_EN \
+ 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE \
+ 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
+ 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \
+ 0x501b40UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
+ 16
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
@@ -264,6 +284,8 @@
0x1f0a1cUL
#define PRS_REG_ROCE_DEST_QP_MAX_PF \
0x1f0430UL
+#define PRS_REG_USE_LIGHT_L2 \
+ 0x1f096cUL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -1426,5 +1448,11 @@
0x620000UL
#define PHY_PCIE_REG_PHY1 \
0x624000UL
-
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644
index 000000000000..23430059471c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -0,0 +1,2954 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/bitops.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_roce.h"
+#include "qed_ll2.h"
+
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
+ p_eqe->opcode, &p_eqe->data);
+}
+
+static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
+ GFP_KERNEL);
+ if (!bmap->bitmap) {
+ DP_NOTICE(p_hwfn,
+ "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
+ return -ENOMEM;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
+ bmap->bitmap);
+ return 0;
+}
+
+static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
+
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+
+ if (*id_num >= bmap->max_count) {
+ DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
+ bmap->max_count);
+ return -EINVAL;
+ }
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ return 0;
+}
+
+static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ return;
+ }
+}
+
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
+{
+ return QED_CAU_DEF_RX_TIMER_RES;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_info *p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ /* Allocate a struct with current pf rdma info */
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+
+ p_rdma_info->num_qps = num_cons / 2;
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
+ rc);
+ goto free_rdma_info;
+ }
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port) {
+ DP_NOTICE(p_hwfn,
+ "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
+ rc);
+ goto free_rdma_dev;
+ }
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
+ * twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * The maximum number of CQs is bounded to twice the number of QPs.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ p_rdma_info->num_qps * 2);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toogle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+free_rdma_info:
+ kfree(p_rdma_info);
+
+ return rc;
+}
+
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ kfree(p_rdma_info->cid_map.bitmap);
+ kfree(p_rdma_info->tid_map.bitmap);
+ kfree(p_rdma_info->toggle_bits.bitmap);
+ kfree(p_rdma_info->cq_map.bitmap);
+ kfree(p_rdma_info->dpi_map.bitmap);
+ kfree(p_rdma_info->pd_map.bitmap);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+
+ kfree(p_rdma_info);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
+{
+ guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
+ guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
+ guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
+ guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
+ guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = 0;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_fmr = QED_RDMA_MAX_FMR;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pci_read_config_dword(cdev->pdev,
+ cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ /* We delay writing to this reg until first cid is allocated. See
+ * qed_cxt_dynamic_ilt_alloc function for more details
+ */
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+ p_cnq_params->sb_num =
+ cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* The first DPI is reserved for the Kernel */
+ __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ spin_lock_init(&p_hwfn->p_rdma_info->lock);
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+ out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* Link may have changed */
+ p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+
+ return p_port;
+}
+
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_RDMA_TYPE_ROCE;
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params, u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = params->int_timeout;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_resize_cq(void *rdma_cxt,
+ struct qed_rdma_resize_cq_in_params *in_params,
+ struct qed_rdma_resize_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_resize_cq_output_params *p_ramrod_res;
+ struct rdma_resize_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ u8 fw_return_code;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_resize_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed resize cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_RESIZE_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_resize_cq;
+
+ p_ramrod->flags = 0;
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
+ in_params->icid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
+ in_params->pbl_two_level);
+
+ p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
+ p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
+ p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ goto err;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
+ out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
+
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_resize_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+ DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ (struct rdma_destroy_cq_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->
+ p_rdma_info->proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+ __le32 *dst_gid)
+{
+ u32 i;
+
+ if (qp->roce_mode == ROCE_V2_IPV4) {
+ /* The IPv4 addresses shall be aligned to the highest word.
+ * The lower words must be zero.
+ */
+ memset(src_gid, 0, sizeof(union qed_gid));
+ memset(dst_gid, 0, sizeof(union qed_gid));
+ src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
+ dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
+ } else {
+ /* GIDs and IPv6 addresses coincide in location and size */
+ for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
+ src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
+ dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
+ }
+ }
+}
+
+static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+{
+ enum roce_flavor flavor;
+
+ switch (roce_mode) {
+ case ROCE_V1:
+ flavor = PLAIN_ROCE;
+ break;
+ case ROCE_V2_IPV4:
+ flavor = RROCE_IPV4;
+ break;
+ case ROCE_V2_IPV6:
+ flavor = ROCE_V2_IPV6;
+ break;
+ default:
+ flavor = MAX_ROCE_MODE;
+ break;
+ }
+ return flavor;
+}
+
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 responder_icid;
+ u32 requester_icid;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &responder_icid);
+ if (rc) {
+ spin_unlock_bh(&p_rdma_info->lock);
+ return rc;
+ }
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ if (rc)
+ goto err;
+
+ /* the two icid's should be adjacent */
+ if ((requester_icid - responder_icid) != 1) {
+ DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+ requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+
+ /* If these icids require a new ILT line allocate DMA-able context for
+ * an ILT page
+ */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
+ if (rc)
+ goto err;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
+ if (rc)
+ goto err;
+
+ *cid = (u16)responder_icid;
+ return rc;
+
+err:
+ spin_lock_bh(&p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Allocate CID - failed, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for IRQ */
+ qp->irq_num_pages = 1;
+ qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->irq_phys_addr, GFP_KERNEL);
+ if (!qp->irq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->irq_num_pages = qp->irq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->rq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
+ rc, physical_queue0);
+
+ if (rc)
+ goto err;
+
+ qp->resp_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ return rc;
+}
+
+static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ union qed_qm_pq_params qm_params;
+ enum roce_flavor roce_flavor;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue0 = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for ORQ */
+ qp->orq_num_pages = 1;
+ qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->orq_phys_addr, GFP_KERNEL);
+ if (!qp->orq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+
+ p_ramrod->flags = 0;
+
+ roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->orq_num_pages = qp->orq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
+ p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
+ p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
+ p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+ p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->sq_cq_id);
+
+ memset(&qm_params, 0, sizeof(qm_params));
+ qm_params.roce.qpid = qp->icid >> 1;
+ physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+
+ p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = qp->udp_src_port;
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+
+ if (rc)
+ goto err;
+
+ qp->req_offloaded = true;
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+ return rc;
+}
+
+static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_sqd,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !(qp->req_offloaded))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+
+ p_ramrod->flags = 0;
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+ qp->sqd_async);
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
+
+ SET_FIELD(p_ramrod->flags,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_invalidated_mw)
+{
+ struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+ struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
+
+ p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+
+ if (!p_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+
+ /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ qp->resp_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct roce_destroy_qp_resp_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *num_bound_mw)
+{
+ struct roce_destroy_qp_req_output_params *p_ramrod_res;
+ struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->req_offloaded)
+ return 0;
+
+ p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy requester failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
+
+ /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+
+ qp->req_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
+ struct roce_query_qp_req_output_params *p_req_ramrod_res;
+ struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
+ struct roce_query_qp_req_ramrod_data *p_req_ramrod;
+ struct qed_sp_init_data init_data;
+ dma_addr_t resp_ramrod_res_phys;
+ dma_addr_t req_ramrod_res_phys;
+ struct qed_spq_entry *p_ent;
+ bool rq_err_state;
+ bool sq_err_state;
+ bool sq_draining;
+ int rc = -ENOMEM;
+
+ if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
+ /* We can't send ramrod to the fw since this qp wasn't offloaded
+ * to the fw yet
+ */
+ out_params->draining = false;
+ out_params->rq_psn = qp->rq_psn;
+ out_params->sq_psn = qp->sq_psn;
+ out_params->state = qp->cur_state;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
+ return 0;
+ }
+
+ if (!(qp->resp_offloaded)) {
+ DP_NOTICE(p_hwfn,
+ "The responder's qp should be offloded before requester's\n");
+ return -EINVAL;
+ }
+
+ /* Send a query responder ramrod to FW to get RQ-PSN and state */
+ p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
+ if (!p_resp_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_resp;
+
+ p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
+ DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_resp;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
+ out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
+ if (!(qp->req_offloaded)) {
+ /* Don't send query qp for the requester */
+ out_params->sq_psn = qp->sq_psn;
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+
+ out_params->state = qp->cur_state;
+
+ return 0;
+ }
+
+ /* Send a query requester ramrod to FW to get SQ-PSN and state */
+ p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_req_ramrod_res),
+ &req_ramrod_res_phys,
+ GFP_KERNEL);
+ if (!p_req_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ init_data.cid = qp->icid + 1;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_req;
+
+ p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
+ DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_req;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
+ out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+ sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+ sq_draining =
+ GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+ else if (sq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_SQE;
+ else if (sq_draining)
+ out_params->draining = true;
+ out_params->state = qp->cur_state;
+
+ return 0;
+
+err_req:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+ return rc;
+err_resp:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+ return rc;
+}
+
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u32 num_invalidated_mw = 0;
+ u32 num_bound_mw = 0;
+ u32 start_cid;
+ int rc;
+
+ /* Destroys the specified QP */
+ if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
+ DP_NOTICE(p_hwfn,
+ "QP must be in error, reset or init state before destroying it\n");
+ return -EINVAL;
+ }
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ /* Send destroy requester ramrod */
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
+ if (rc)
+ return rc;
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+
+ /* Release responder's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid - start_cid);
+
+ /* Release requester's icid */
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
+ qp->icid + 1 - start_cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return 0;
+}
+
+int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+ DP_ERR(p_hwfn->cdev,
+ "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
+ return NULL;
+ }
+
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+
+ DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ u32 num_invalidated_mw = 0, num_bound_mw = 0;
+ int rc = 0;
+
+ /* Perform additional operations according to the current state and the
+ * next state
+ */
+ if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
+ (prev_state == QED_ROCE_QP_STATE_RESET)) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
+ /* Init->RTR or Reset->RTR */
+ rc = qed_roce_sp_create_responder(p_hwfn, qp);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTR-> RTS */
+ rc = qed_roce_sp_create_requester(p_hwfn, qp);
+ if (rc)
+ return rc;
+
+ /* Send modify responder ramrod */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTS->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* RTS->SQD */
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* SQD->SQD */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* SQD->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
+ qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ /* ->ERR */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
+ params->modify_flags);
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
+ /* Any state -> RESET */
+
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+ &num_invalidated_mw);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
+ &num_bound_mw);
+
+ if (num_invalidated_mw != num_bound_mw) {
+ DP_NOTICE(p_hwfn,
+ "number of invalidate memory windows is different from bounded ones\n");
+ return -EINVAL;
+ }
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ }
+
+ return rc;
+}
+
+int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+
+ p_ramrod->flags = 0;
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
+ p_hwfn->p_rdma_info->last_tid);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+
+ SET_FIELD(p_ramrod->flags,
+ RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW_TYPE1:
+ tid_type = RDMA_TID_MW_TYPE1;
+ break;
+ case QED_RDMA_TID_MW_TYPE2A:
+ tid_type = RDMA_TID_MW_TYPE2A;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ if (params->zbva) {
+ /* Lower 32 bits of the registered MR address.
+ * In case of zero based MR, will hold FBO
+ */
+ p_ramrod->va.hi = 0;
+ p_ramrod->va.lo = cpu_to_le32(params->fbo);
+ } else {
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ }
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_LEADING_HWFN(cdev);
+}
+
+static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
+}
+
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_roce_ll2_packet *packet = cookie;
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+
+ roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
+}
+
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
+ cookie, first_frag_addr,
+ b_last_fragment, b_last_packet);
+}
+
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet)
+{
+ struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
+ struct qed_roce_ll2_rx_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_roce_ll2_packet pkt;
+
+ DP_VERBOSE(cdev,
+ QED_MSG_LL2,
+ "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
+ (void *)(uintptr_t)rx_buf_addr,
+ data_length, data_length_error);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.n_seg = 1;
+ pkt.payload[0].baddr = rx_buf_addr;
+ pkt.payload[0].len = data_length;
+
+ memset(&params, 0, sizeof(params));
+ params.vlan_id = vlan;
+ *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
+ *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
+
+ if (data_length_error) {
+ DP_ERR(cdev,
+ "roce ll2 rx complete: data length error %d, length=%d\n",
+ data_length_error, data_length);
+ params.rc = -EINVAL;
+ }
+
+ roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev,
+ "qed roce mac filter failed - roce_info/ll2 NULL\n");
+ return -EINVAL;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt) {
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to acquire PTT\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&hwfn->ll2->lock);
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
+ new_mac_address);
+ mutex_unlock(&hwfn->ll2->lock);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add mac filter\n");
+
+ return rc;
+}
+
+static int qed_roce_ll2_start(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2;
+ struct qed_ll2_info ll2_params;
+ int rc;
+
+ if (!params) {
+ DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
+ return -EINVAL;
+ }
+ if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
+ params->cbs.tx_cb, params->cbs.rx_cb);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(params->mac_address)) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
+ params->mac_address);
+ return -EINVAL;
+ }
+
+ /* Initialize */
+ roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
+ if (!roce_ll2) {
+ DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
+ return -ENOMEM;
+ }
+ memset(roce_ll2, 0, sizeof(*roce_ll2));
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+ roce_ll2->cbs = params->cbs;
+ roce_ll2->cb_cookie = params->cb_cookie;
+ mutex_init(&roce_ll2->lock);
+
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.conn_type = QED_LL2_TYPE_ROCE;
+ ll2_params.mtu = params->mtu;
+ ll2_params.rx_drop_ttl0_flg = true;
+ ll2_params.rx_vlan_removal_en = false;
+ ll2_params.tx_dest = CORE_TX_DEST_NW;
+ ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
+ ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
+ ll2_params.gsi_enable = true;
+
+ rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
+ params->max_rx_buffers,
+ params->max_tx_buffers,
+ &roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc) {
+ DP_ERR(cdev,
+ "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
+ rc);
+ goto err1;
+ }
+
+ hwfn->ll2 = roce_ll2;
+
+ rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
+ if (rc) {
+ hwfn->ll2 = NULL;
+ goto err2;
+ }
+ ether_addr_copy(roce_ll2->mac_address, params->mac_address);
+
+ return 0;
+
+err2:
+ qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err1:
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+err:
+ kfree(roce_ll2);
+ return rc;
+}
+
+static int qed_roce_ll2_stop(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ int rc;
+
+ if (!cdev) {
+ DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
+ return -EINVAL;
+ }
+
+ if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
+ DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
+ return -EINVAL;
+ }
+
+ /* remove LL2 MAC address filter */
+ rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
+ eth_zero_addr(roce_ll2->mac_address);
+
+ rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle);
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
+ rc);
+
+ qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
+
+ roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ kfree(roce_ll2);
+
+ return rc;
+}
+
+static int qed_roce_ll2_tx(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_tx_params *params)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+ enum qed_ll2_roce_flavor_type qed_roce_flavor;
+ u8 flags = 0;
+ int rc;
+ int i;
+
+ if (!cdev || !pkt || !params) {
+ DP_ERR(cdev,
+ "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
+ cdev, pkt, params);
+ return -EINVAL;
+ }
+
+ qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
+ : QED_LL2_RROCE;
+
+ if (pkt->roce_mode == ROCE_V2_IPV4)
+ flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+
+ /* Tx header */
+ rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
+ 1 + pkt->n_seg, 0, flags, 0,
+ qed_roce_flavor, pkt->header.baddr,
+ pkt->header.len, pkt, 1);
+ if (rc) {
+ DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_HEAD_FAILURE;
+ }
+
+ /* Tx payload */
+ for (i = 0; i < pkt->n_seg; i++) {
+ rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle,
+ pkt->payload[i].baddr,
+ pkt->payload[i].len);
+ if (rc) {
+ /* If failed not much to do here, partial packet has
+ * been posted * we can't free memory, will need to wait
+ * for completion
+ */
+ DP_ERR(cdev,
+ "roce ll2 tx: payload failed (rc=%d)\n", rc);
+ return QED_ROCE_TX_FRAG_FAILURE;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw)
+{
+ return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->ll2->handle,
+ buf->baddr, buf->len,
+ (void *)(uintptr_t)cookie, notify_fw);
+}
+
+static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
+
+ return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ roce_ll2->handle, stats);
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .roce_ll2_start = &qed_roce_ll2_start,
+ .roce_ll2_stop = &qed_roce_ll2_stop,
+ .roce_ll2_tx = &qed_roce_ll2_tx,
+ .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
+ .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .roce_ll2_stats = &qed_roce_ll2_stats,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops()
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644
index 000000000000..2f091e8a0f40
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -0,0 +1,216 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_H
+#define _QED_ROCE_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_ll2.h"
+
+#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u16 queue_zone_base;
+ enum protocol_type proto;
+};
+
+struct qed_rdma_resize_cq_in_params {
+ u16 icid;
+ u32 cq_size;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+};
+
+struct qed_rdma_resize_cq_out_params {
+ u32 prod;
+ u32 cons;
+};
+
+struct qed_rdma_resize_cnq_in_params {
+ u32 cnq_id;
+ u32 pbl_page_size_log;
+ u64 pbl_ptr;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+};
+
+int
+qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params);
+int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
+int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
+int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
+void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
+struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
+struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
+int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params);
+void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
+int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
+int qed_rdma_stop(void *rdma_cxt);
+u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
+u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
+void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
+void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+void qed_async_roce_event(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe);
+int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
+int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params);
+int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+#endif
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index a548504c3420..652c90819758 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -61,6 +61,10 @@ union ramrod_data {
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
struct vport_filter_update_ramrod_data vport_filter_update;
struct rdma_init_func_ramrod_data rdma_init_func;
@@ -81,6 +85,7 @@ union ramrod_data {
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+ struct roce_init_func_ramrod_data roce_init_func;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 349af182d085..caff41544898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,6 +28,9 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#include "qed_roce.h"
+#endif
/***************************************************************************
* Structures & Definitions
@@ -237,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+ case PROTOCOLID_ROCE:
+ qed_async_roce_event(p_hwfn, p_eqe);
+ return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index a4a3cead15bb..d2d6621fe0e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1851,8 +1851,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
u16 qid = mbx->req_virt->start_txq.tx_qid;
- p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
+ p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
}
qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 85334ceaf69c..abf5bf11f865 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -544,7 +544,7 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
u32 db_addr;
- db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY);
+ db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
db_addr;
}
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 74a49850d74d..28dc58919c85 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
+qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index e01adce4a966..28c0e9f42c9e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -106,6 +106,13 @@ struct qede_vlan {
bool configured;
};
+struct qede_rdma_dev {
+ struct qedr_dev *qedr_dev;
+ struct list_head entry;
+ struct list_head roce_event_list;
+ struct workqueue_struct *roce_wq;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -185,6 +192,8 @@ struct qede_dev {
unsigned long sp_flags;
u16 vxlan_dst_port;
u16 geneve_dst_port;
+
+ struct qede_rdma_dev rdma_info;
};
enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index cd23a2946db7..343038ca047d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -36,7 +36,7 @@
#include <linux/random.h>
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
-
+#include <linux/qed/qede_roce.h>
#include "qede.h"
static char version[] =
@@ -100,7 +100,8 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
static void qede_link_update(void *dev, struct qed_link_output *link);
#ifdef CONFIG_QED_SRIOV
-static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -109,6 +110,9 @@ static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
vlan, vf);
@@ -189,8 +193,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
struct ethtool_drvinfo drvinfo;
struct qede_dev *edev;
- /* Currently only support name change */
- if (event != NETDEV_CHANGENAME)
+ if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
goto done;
/* Check whether this is a qede device */
@@ -203,11 +206,18 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
goto done;
edev = netdev_priv(ndev);
- /* Notify qed of the name change */
- if (!edev->ops || !edev->ops->common)
- goto done;
- edev->ops->common->set_id(edev->cdev, edev->ndev->name,
- "qede");
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
+ break;
+ case NETDEV_CHANGEADDR:
+ edev = netdev_priv(ndev);
+ qede_roce_event_changeaddr(edev);
+ break;
+ }
done:
return NOTIFY_DONE;
@@ -2541,10 +2551,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev);
+ rc = qede_roce_dev_add(edev);
+ if (rc)
+ goto err3;
+
rc = register_netdev(edev->ndev);
if (rc) {
DP_NOTICE(edev, "Cannot register net-device\n");
- goto err3;
+ goto err4;
}
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
@@ -2564,6 +2578,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
+err4:
+ qede_roce_dev_remove(edev);
err3:
free_netdev(edev->ndev);
err2:
@@ -2610,8 +2626,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
cancel_delayed_work_sync(&edev->sp_task);
+
unregister_netdev(ndev);
+ qede_roce_dev_remove(edev);
+
edev->ops->common->set_power_state(cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
@@ -3508,6 +3527,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
DP_INFO(edev, "Starting qede unload\n");
+ qede_roce_dev_event_close(edev);
mutex_lock(&edev->qede_lock);
edev->state = QEDE_STATE_CLOSED;
@@ -3608,6 +3628,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
/* Query whether link is already-up */
memset(&link_output, 0, sizeof(link_output));
edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_roce_dev_event_open(edev);
qede_link_update(edev, &link_output);
DP_INFO(edev, "Ending successfully qede load\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
new file mode 100644
index 000000000000..9867f960b063
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -0,0 +1,314 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/qed/qede_roce.h>
+#include "qede.h"
+
+static struct qedr_driver *qedr_drv;
+static LIST_HEAD(qedr_dev_list);
+static DEFINE_MUTEX(qedr_dev_list_lock);
+
+bool qede_roce_supported(struct qede_dev *dev)
+{
+ return dev->dev_info.common.rdma_supported;
+}
+
+static void _qede_roce_dev_add(struct qede_dev *edev)
+{
+ if (!qedr_drv)
+ return;
+
+ edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
+ edev->ndev);
+}
+
+static int qede_roce_create_wq(struct qede_dev *edev)
+{
+ INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
+ edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
+ if (!edev->rdma_info.roce_wq) {
+ DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qede_roce_cleanup_event(struct qede_dev *edev)
+{
+ struct list_head *head = &edev->rdma_info.roce_event_list;
+ struct qede_roce_event_work *event_node;
+
+ flush_workqueue(edev->rdma_info.roce_wq);
+ while (!list_empty(head)) {
+ event_node = list_entry(head->next, struct qede_roce_event_work,
+ list);
+ cancel_work_sync(&event_node->work);
+ list_del(&event_node->list);
+ kfree(event_node);
+ }
+}
+
+static void qede_roce_destroy_wq(struct qede_dev *edev)
+{
+ qede_roce_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.roce_wq);
+}
+
+int qede_roce_dev_add(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ if (qede_roce_supported(edev)) {
+ rc = qede_roce_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_roce_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+
+ return rc;
+}
+
+static void _qede_roce_dev_remove(struct qede_dev *edev)
+{
+ if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
+ qedr_drv->remove(edev->rdma_info.qedr_dev);
+ edev->rdma_info.qedr_dev = NULL;
+}
+
+void qede_roce_dev_remove(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ qede_roce_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_remove(edev);
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_open(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
+}
+
+static void qede_roce_dev_open(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_open(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_roce_dev_close(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
+}
+
+static void qede_roce_dev_close(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_roce_dev_close(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void qede_roce_dev_shutdown(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+int qede_roce_register_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+ u8 qedr_counter = 0;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv) {
+ mutex_unlock(&qedr_dev_list_lock);
+ return -EINVAL;
+ }
+ qedr_drv = drv;
+
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ struct net_device *ndev;
+
+ qedr_counter++;
+ _qede_roce_dev_add(edev);
+ ndev = edev->ndev;
+ if (netif_running(ndev) && netif_oper_up(ndev))
+ _qede_roce_dev_open(edev);
+ }
+ mutex_unlock(&qedr_dev_list_lock);
+
+ DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
+ qedr_counter);
+
+ return 0;
+}
+EXPORT_SYMBOL(qede_roce_register_driver);
+
+void qede_roce_unregister_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+
+ mutex_lock(&qedr_dev_list_lock);
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ if (edev->rdma_info.qedr_dev)
+ _qede_roce_dev_remove(edev);
+ }
+ qedr_drv = NULL;
+ mutex_unlock(&qedr_dev_list_lock);
+}
+EXPORT_SYMBOL(qede_roce_unregister_driver);
+
+static void qede_roce_changeaddr(struct qede_dev *edev)
+{
+ if (!qede_roce_supported(edev))
+ return;
+
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
+}
+
+struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
+ *edev)
+{
+ struct qede_roce_event_work *event_node = NULL;
+ struct list_head *list_node = NULL;
+ bool found = false;
+
+ list_for_each(list_node, &edev->rdma_info.roce_event_list) {
+ event_node = list_entry(list_node, struct qede_roce_event_work,
+ list);
+ if (!work_pending(&event_node->work)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+ if (!event_node) {
+ DP_NOTICE(edev,
+ "qedr: Could not allocate memory for roce work\n");
+ return NULL;
+ }
+ list_add_tail(&event_node->list,
+ &edev->rdma_info.roce_event_list);
+ }
+
+ return event_node;
+}
+
+static void qede_roce_handle_event(struct work_struct *work)
+{
+ struct qede_roce_event_work *event_node;
+ enum qede_roce_event event;
+ struct qede_dev *edev;
+
+ event_node = container_of(work, struct qede_roce_event_work, work);
+ event = event_node->event;
+ edev = event_node->ptr;
+
+ switch (event) {
+ case QEDE_UP:
+ qede_roce_dev_open(edev);
+ break;
+ case QEDE_DOWN:
+ qede_roce_dev_close(edev);
+ break;
+ case QEDE_CLOSE:
+ qede_roce_dev_shutdown(edev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qede_roce_changeaddr(edev);
+ break;
+ default:
+ DP_NOTICE(edev, "Invalid roce event %d", event);
+ }
+}
+
+static void qede_roce_add_event(struct qede_dev *edev,
+ enum qede_roce_event event)
+{
+ struct qede_roce_event_work *event_node;
+
+ if (!edev->rdma_info.qedr_dev)
+ return;
+
+ event_node = qede_roce_get_free_event_node(edev);
+ if (!event_node)
+ return;
+
+ event_node->event = event;
+ event_node->ptr = edev;
+
+ INIT_WORK(&event_node->work, qede_roce_handle_event);
+ queue_work(edev->rdma_info.roce_wq, &event_node->work);
+}
+
+void qede_roce_dev_event_open(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_UP);
+}
+
+void qede_roce_dev_event_close(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_DOWN);
+}
+
+void qede_roce_event_changeaddr(struct qede_dev *edev)
+{
+ qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 24061b9b92e8..5f327659efa7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -238,7 +238,7 @@ int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
-int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
#else
static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index afd687e5e779..50eaafa3eaba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1915,7 +1915,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
}
int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
- u16 vlan, u8 qos)
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1928,6 +1928,9 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
if (vf >= sriov->num_vfs || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan > MAX_VLAN_ID) {
netdev_err(netdev,
"Invalid VLAN ID, allowed range is [0 - %d]\n",
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index c412ba9a27e7..da4e90db4d98 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -19,6 +19,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/iopoll.h>
+#include <linux/acpi.h>
#include "emac.h"
#include "emac-mac.h"
#include "emac-phy.h"
@@ -167,7 +168,6 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *phy_np;
struct mii_bus *mii_bus;
int ret;
@@ -183,14 +183,37 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
mii_bus->parent = &pdev->dev;
mii_bus->priv = adpt;
- ret = of_mdiobus_register(mii_bus, np);
- if (ret) {
- dev_err(&pdev->dev, "could not register mdio bus\n");
- return ret;
+ if (has_acpi_companion(&pdev->dev)) {
+ u32 phy_addr;
+
+ ret = mdiobus_register(mii_bus);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register mdio bus\n");
+ return ret;
+ }
+ ret = device_property_read_u32(&pdev->dev, "phy-channel",
+ &phy_addr);
+ if (ret)
+ /* If we can't read a valid phy address, then assume
+ * that there is only one phy on this mdio bus.
+ */
+ adpt->phydev = phy_find_first(mii_bus);
+ else
+ adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+
+ } else {
+ struct device_node *phy_np;
+
+ ret = of_mdiobus_register(mii_bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register mdio bus\n");
+ return ret;
+ }
+
+ phy_np = of_parse_phandle(np, "phy-handle", 0);
+ adpt->phydev = of_phy_find_device(phy_np);
}
- phy_np = of_parse_phandle(np, "phy-handle", 0);
- adpt->phydev = of_phy_find_device(phy_np);
if (!adpt->phydev) {
dev_err(&pdev->dev, "could not find external phy\n");
mdiobus_unregister(mii_bus);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 6ab0a3c96431..75c1b530e39e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -14,6 +14,7 @@
*/
#include <linux/iopoll.h>
+#include <linux/acpi.h>
#include <linux/of_device.h>
#include "emac.h"
#include "emac-mac.h"
@@ -662,6 +663,24 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
}
+static int emac_sgmii_acpi_match(struct device *dev, void *data)
+{
+ static const struct acpi_device_id match_table[] = {
+ {
+ .id = "QCOM8071",
+ .driver_data = (kernel_ulong_t)emac_sgmii_init_v2,
+ },
+ {}
+ };
+ const struct acpi_device_id *id = acpi_match_device(match_table, dev);
+ emac_sgmii_initialize *initialize = data;
+
+ if (id)
+ *initialize = (emac_sgmii_initialize)id->driver_data;
+
+ return !!id;
+}
+
static const struct of_device_id emac_sgmii_dt_match[] = {
{
.compatible = "qcom,fsm9900-emac-sgmii",
@@ -679,43 +698,87 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
struct platform_device *sgmii_pdev = NULL;
struct emac_phy *phy = &adpt->phy;
struct resource *res;
- const struct of_device_id *match;
- struct device_node *np;
+ int ret;
- np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0);
- if (!np) {
- dev_err(&pdev->dev, "missing internal-phy property\n");
- return -ENODEV;
- }
+ if (has_acpi_companion(&pdev->dev)) {
+ struct device *dev;
- sgmii_pdev = of_find_device_by_node(np);
- if (!sgmii_pdev) {
- dev_err(&pdev->dev, "invalid internal-phy property\n");
- return -ENODEV;
- }
+ dev = device_find_child(&pdev->dev, &phy->initialize,
+ emac_sgmii_acpi_match);
- match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "unrecognized internal phy node\n");
- return -ENODEV;
- }
+ if (!dev) {
+ dev_err(&pdev->dev, "cannot find internal phy node\n");
+ return -ENODEV;
+ }
- phy->initialize = (emac_sgmii_initialize)match->data;
+ sgmii_pdev = to_platform_device(dev);
+ } else {
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "missing internal-phy property\n");
+ return -ENODEV;
+ }
+
+ sgmii_pdev = of_find_device_by_node(np);
+ if (!sgmii_pdev) {
+ dev_err(&pdev->dev, "invalid internal-phy property\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "unrecognized internal phy node\n");
+ ret = -ENODEV;
+ goto error_put_device;
+ }
+
+ phy->initialize = (emac_sgmii_initialize)match->data;
+ }
/* Base address is the first address */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
- phy->base = devm_ioremap_resource(&sgmii_pdev->dev, res);
- if (IS_ERR(phy->base))
- return PTR_ERR(phy->base);
+ if (!res) {
+ ret = -EINVAL;
+ goto error_put_device;
+ }
+
+ phy->base = ioremap(res->start, resource_size(res));
+ if (!phy->base) {
+ ret = -ENOMEM;
+ goto error_put_device;
+ }
/* v2 SGMII has a per-lane digital digital, so parse it if it exists */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1);
if (res) {
- phy->digital = devm_ioremap_resource(&sgmii_pdev->dev, res);
- if (IS_ERR(phy->base))
- return PTR_ERR(phy->base);
-
+ phy->digital = ioremap(res->start, resource_size(res));
+ if (!phy->digital) {
+ ret = -ENOMEM;
+ goto error_unmap_base;
+ }
}
- return phy->initialize(adpt);
+ ret = phy->initialize(adpt);
+ if (ret)
+ goto error;
+
+ /* We've remapped the addresses, so we don't need the device any
+ * more. of_find_device_by_node() says we should release it.
+ */
+ put_device(&sgmii_pdev->dev);
+
+ return 0;
+
+error:
+ if (phy->digital)
+ iounmap(phy->digital);
+error_unmap_base:
+ iounmap(phy->base);
+error_put_device:
+ put_device(&sgmii_pdev->dev);
+
+ return ret;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index e47d38701d6c..9bf3b2b82e95 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -22,6 +22,7 @@
#include <linux/of_device.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
#include "emac.h"
#include "emac-mac.h"
#include "emac-phy.h"
@@ -531,18 +532,16 @@ static void emac_clks_teardown(struct emac_adapter *adpt)
static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
- struct device_node *node = pdev->dev.of_node;
struct net_device *netdev = adpt->netdev;
struct resource *res;
- const void *maddr;
+ char maddr[ETH_ALEN];
int ret = 0;
/* get mac address */
- maddr = of_get_mac_address(node);
- if (!maddr)
- eth_hw_addr_random(netdev);
- else
+ if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
ether_addr_copy(netdev->dev_addr, maddr);
+ else
+ eth_hw_addr_random(netdev);
/* Core 0 interrupt */
ret = platform_get_irq(pdev, 0);
@@ -577,6 +576,16 @@ static const struct of_device_id emac_dt_match[] = {
{}
};
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id emac_acpi_match[] = {
+ {
+ .id = "QCOM8070",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, emac_acpi_match);
+#endif
+
static int emac_probe(struct platform_device *pdev)
{
struct net_device *netdev;
@@ -723,6 +732,10 @@ static int emac_remove(struct platform_device *pdev)
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
+ if (adpt->phy.digital)
+ iounmap(adpt->phy.digital);
+ iounmap(adpt->phy.base);
+
return 0;
}
@@ -732,6 +745,7 @@ static struct platform_driver emac_platform_driver = {
.driver = {
.name = "qcom-emac",
.of_match_table = emac_dt_match,
+ .acpi_match_table = ACPI_PTR(emac_acpi_match),
},
};
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 4f132cf177cd..85ec447c2d18 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -27,7 +27,7 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A777x and R8A779x.
+ R8A7740, R8A774x, R8A777x and R8A779x.
config RAVB
tristate "Renesas Ethernet AVB support"
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 440ae272161c..05b0dc55de77 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2959,6 +2959,8 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7743", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7745", .data = &r8a779x_data },
{ .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
{ .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 1ab995f7146b..2eb9b49569d5 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <net/neighbour.h>
#include <net/switchdev.h>
@@ -52,6 +53,9 @@ struct rocker_port {
struct rocker_dma_ring_info rx_ring;
};
+struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker);
+
struct rocker_world_ops;
struct rocker {
@@ -66,6 +70,7 @@ struct rocker {
spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
+ struct notifier_block fib_nb;
struct rocker_world_ops *wops;
void *wpriv;
};
@@ -117,11 +122,6 @@ struct rocker_world_ops {
int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
struct switchdev_obj_port_vlan *vlan,
switchdev_obj_dump_cb_t *cb);
- int (*port_obj_fib4_add)(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans);
- int (*port_obj_fib4_del)(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4);
int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
@@ -141,6 +141,11 @@ struct rocker_world_ops {
int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port,
const unsigned char *addr,
__be16 vlan_id);
+ int (*fib4_add)(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info);
+ int (*fib4_del)(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info);
+ void (*fib4_abort)(struct rocker *rocker);
};
extern struct rocker_world_ops rocker_ofdpa_ops;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 1f0c08602eba..5424fb341613 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1625,29 +1625,6 @@ rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
}
static int
-rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fib4_add)
- return -EOPNOTSUPP;
- return wops->port_obj_fib4_add(rocker_port, fib4, trans);
-}
-
-static int
-rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4)
-{
- struct rocker_world_ops *wops = rocker_port->rocker->wops;
-
- if (!wops->port_obj_fib4_del)
- return -EOPNOTSUPP;
- return wops->port_obj_fib4_del(rocker_port, fib4);
-}
-
-static int
rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
@@ -1733,6 +1710,34 @@ static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
}
+static int rocker_world_fib4_add(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops->fib4_add)
+ return 0;
+ return wops->fib4_add(rocker, fen_info);
+}
+
+static int rocker_world_fib4_del(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops->fib4_del)
+ return 0;
+ return wops->fib4_del(rocker, fen_info);
+}
+
+static void rocker_world_fib4_abort(struct rocker *rocker)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (wops->fib4_abort)
+ wops->fib4_abort(rocker);
+}
+
/*****************
* Net device ops
*****************/
@@ -2096,11 +2101,6 @@ static int rocker_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = rocker_world_port_obj_fib4_add(rocker_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj),
- trans);
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = rocker_world_port_obj_fdb_add(rocker_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -2125,10 +2125,6 @@ static int rocker_port_obj_del(struct net_device *dev,
err = rocker_world_port_obj_vlan_del(rocker_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- err = rocker_world_port_obj_fib4_del(rocker_port,
- SWITCHDEV_OBJ_IPV4_FIB(obj));
- break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = rocker_world_port_obj_fdb_del(rocker_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -2175,6 +2171,31 @@ static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_obj_dump = rocker_port_obj_dump,
};
+static int rocker_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
+ struct fib_entry_notifier_info *fen_info = ptr;
+ int err;
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_ADD:
+ err = rocker_world_fib4_add(rocker, fen_info);
+ if (err)
+ rocker_world_fib4_abort(rocker);
+ else
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ rocker_world_fib4_del(rocker, fen_info);
+ break;
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ rocker_world_fib4_abort(rocker);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
/********************
* ethtool interface
********************/
@@ -2740,6 +2761,9 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_ports;
}
+ rocker->fib_nb.notifier_call = rocker_router_fib_event;
+ register_fib_notifier(&rocker->fib_nb);
+
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
(int)sizeof(rocker->hw.id), &rocker->hw.id);
@@ -2771,6 +2795,7 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ unregister_fib_notifier(&rocker->fib_nb);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
rocker_remove_ports(rocker);
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -2799,6 +2824,37 @@ static bool rocker_port_dev_check(const struct net_device *dev)
return dev->netdev_ops == &rocker_port_netdev_ops;
}
+static bool rocker_port_dev_check_under(const struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+
+ if (!rocker_port_dev_check(dev))
+ return false;
+
+ rocker_port = netdev_priv(dev);
+ if (rocker_port->rocker != rocker)
+ return false;
+
+ return true;
+}
+
+struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (rocker_port_dev_check_under(dev, rocker))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
+ if (rocker_port_dev_check_under(lower_dev, rocker))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index fcad907baecf..431a60804272 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -99,6 +99,7 @@ struct ofdpa_flow_tbl_entry {
struct ofdpa_flow_tbl_key key;
size_t key_len;
u32 key_crc32; /* key */
+ struct fib_info *fi;
};
struct ofdpa_group_tbl_entry {
@@ -189,6 +190,7 @@ struct ofdpa {
spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
u32 neigh_tbl_next_index;
unsigned long ageing_time;
+ bool fib_aborted;
};
struct ofdpa_port {
@@ -1043,7 +1045,8 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
- u32 group_id, int flags)
+ u32 group_id, struct fib_info *fi,
+ int flags)
{
struct ofdpa_flow_tbl_entry *entry;
@@ -1060,6 +1063,7 @@ static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
entry->key.ucast_routing.group_id = group_id;
entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
ucast_routing.group_id);
+ entry->fi = fi;
return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
}
@@ -1425,7 +1429,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
- group_id, flags);
+ group_id, NULL, flags);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
@@ -2390,7 +2394,7 @@ found:
static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
struct switchdev_trans *trans, __be32 dst,
- int dst_len, const struct fib_info *fi,
+ int dst_len, struct fib_info *fi,
u32 tb_id, int flags)
{
const struct fib_nh *nh;
@@ -2426,7 +2430,7 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
dst_mask, priority, goto_tbl,
- group_id, flags);
+ group_id, fi, flags);
if (err)
netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
err, &dst);
@@ -2718,28 +2722,6 @@ static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
return err;
}
-static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
-{
- struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- return ofdpa_port_fib_ipv4(ofdpa_port, trans,
- htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id, 0);
-}
-
-static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_ipv4_fib *fib4)
-{
- struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
-
- return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
- htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id,
- OFDPA_OP_FLAG_REMOVE);
-}
-
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
@@ -2922,6 +2904,82 @@ static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
}
+static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
+ struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+
+ rocker_port = rocker_port_dev_lower_find(dev, rocker);
+ return rocker_port ? rocker_port->wpriv : NULL;
+}
+
+static int ofdpa_fib4_add(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+ int err;
+
+ if (ofdpa->fib_aborted)
+ return 0;
+ ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ if (!ofdpa_port)
+ return 0;
+ err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ fen_info->dst_len, fen_info->fi,
+ fen_info->tb_id, 0);
+ if (err)
+ return err;
+ fib_info_offload_inc(fen_info->fi);
+ return 0;
+}
+
+static int ofdpa_fib4_del(struct rocker *rocker,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+
+ if (ofdpa->fib_aborted)
+ return 0;
+ ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
+ if (!ofdpa_port)
+ return 0;
+ fib_info_offload_dec(fen_info->fi);
+ return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
+ fen_info->dst_len, fen_info->fi,
+ fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
+}
+
+static void ofdpa_fib4_abort(struct rocker *rocker)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+ struct ofdpa_port *ofdpa_port;
+ struct ofdpa_flow_tbl_entry *flow_entry;
+ struct hlist_node *tmp;
+ unsigned long flags;
+ int bkt;
+
+ if (ofdpa->fib_aborted)
+ return;
+
+ spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
+ if (flow_entry->key.tbl_id !=
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
+ continue;
+ ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
+ rocker);
+ if (!ofdpa_port)
+ continue;
+ fib_info_offload_dec(flow_entry->fi);
+ ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
+ flow_entry);
+ }
+ spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
+ ofdpa->fib_aborted = true;
+}
+
struct rocker_world_ops rocker_ofdpa_ops = {
.kind = "ofdpa",
.priv_size = sizeof(struct ofdpa),
@@ -2941,8 +2999,6 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
.port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
- .port_obj_fib4_add = ofdpa_port_obj_fib4_add,
- .port_obj_fib4_del = ofdpa_port_obj_fib4_del,
.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
.port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
@@ -2951,4 +3007,7 @@ struct rocker_world_ops rocker_ofdpa_ops = {
.port_neigh_update = ofdpa_port_neigh_update,
.port_neigh_destroy = ofdpa_port_neigh_destroy,
.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
+ .fib4_add = ofdpa_fib4_add,
+ .fib4_del = ofdpa_fib4_del,
+ .fib4_abort = ofdpa_fib4_abort,
};
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 9fbc12a8f80c..241520943ada 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1156,7 +1156,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
* acquired locks in the wrong order.
*/
list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
- async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ if (async->complete)
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
list_del(&async->list);
kfree(async);
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index dd204d9704c6..77a5364f7a10 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1269,13 +1269,13 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
if (IS_ERR(ptp->phc_clock)) {
rc = PTR_ERR(ptp->phc_clock);
goto fail3;
- }
-
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ } else if (ptp->phc_clock) {
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
}
ptp->nic_ts_enabled = false;
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 816c44689e67..9abcf4aded30 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -22,7 +22,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
}
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -31,6 +31,9 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
} else {
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
index 400df526586d..ba1762e7f216 100644
--- a/drivers/net/ethernet/sfc/sriov.h
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -16,7 +16,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk);
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 503a3b6dce91..73212590d04a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2323,6 +2323,9 @@ static int smc_drv_probe(struct platform_device *pdev)
} else {
lp->cfg.flags |= SMC91X_USE_16BIT;
}
+ if (!device_property_read_u32(&pdev->dev, "reg-shift",
+ &val))
+ lp->io_shift = val;
}
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 6f6bbc54e9fe..7df4ff158f3d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index df5580dcdfed..51019b794be5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 170a18b61281..6e3b82972ce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -187,7 +187,7 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
if (IS_ERR(priv->ptp_clock)) {
priv->ptp_clock = NULL;
pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
- } else
+ } else if (priv->ptp_clock)
pr_debug("Added PTP HW clock successfully on %s\n",
priv->dev->name);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 5a1e98547031..470b3dcd54e5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -127,7 +127,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const unsigned char[], int);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -428,7 +428,7 @@ out:
/*
* Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
+ * This function is called by the tty module in the kernel when
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
@@ -436,7 +436,6 @@ static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
- unsigned char buf[512];
int count1;
if (!count)
@@ -446,10 +445,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
if (!sp)
return;
- memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
-
/* Read the characters out of the buffer */
-
count1 = count;
while (count) {
count--;
@@ -459,7 +455,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
continue;
}
}
- sixpack_decode(sp, buf, count1);
+ sixpack_decode(sp, cp, count1);
sp_put(sp);
tty_unthrottle(tty);
@@ -992,7 +988,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
{
unsigned char inbyte;
int count1;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 284b97b6b258..f4fbcb5aa24a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -433,7 +433,7 @@ struct nvsp_1_message_revoke_send_buffer {
*/
struct nvsp_1_message_send_rndis_packet {
/*
- * This field is specified by RNIDS. They assume there's two different
+ * This field is specified by RNDIS. They assume there's two different
* channels of communication. However, the Network VSP only has one.
* Therefore, the channel travels with the RNDIS packet.
*/
@@ -578,7 +578,7 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */
u32 count;
- /* The offset of the send indireciton table from top of this struct.
+ /* The offset of the send indirection table from top of this struct.
* The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number.
*/
@@ -649,6 +649,8 @@ struct multi_recv_comp {
struct netvsc_stats {
u64 packets;
u64 bytes;
+ u64 broadcast;
+ u64 multicast;
struct u64_stats_sync syncp;
};
@@ -695,9 +697,8 @@ struct net_device_context {
bool start_remove;
/* State to manage the associated VF interface. */
- struct net_device *vf_netdev;
- bool vf_inject;
- atomic_t vf_use_cnt;
+ struct net_device __rcu *vf_netdev;
+
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
@@ -733,7 +734,6 @@ struct netvsc_device {
struct nvsp_message channel_init_pkt;
struct nvsp_message revoke_packet;
- /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
@@ -1238,7 +1238,7 @@ struct rndis_message {
u32 ndis_msg_type;
/* Total length of this message, from the beginning */
- /* of the sruct rndis_message, in bytes. */
+ /* of the struct rndis_message, in bytes. */
u32 msg_len;
/* Actual message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ff05b9b0837f..720b5fa9e625 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -635,7 +635,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
q_idx = nvsc_packet->q_idx;
channel = incoming_channel;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
num_outstanding_sends =
@@ -944,7 +944,7 @@ int netvsc_send(struct hv_device *device,
}
if (msdp->skb)
- dev_kfree_skb_any(msdp->skb);
+ dev_consume_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) {
msdp->skb = skb;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2360e704e271..52eeb2f67276 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -667,51 +667,23 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = hv_get_drvdata(device_obj);
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct net_device *vf_netdev;
struct sk_buff *skb;
- struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
- u32 bytes_recvd = packet->total_data_buflen;
- int ret = 0;
- if (!net || net->reg_state != NETREG_REGISTERED)
+ if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- if (READ_ONCE(net_device_ctx->vf_inject)) {
- atomic_inc(&net_device_ctx->vf_use_cnt);
- if (!READ_ONCE(net_device_ctx->vf_inject)) {
- /*
- * We raced; just move on.
- */
- atomic_dec(&net_device_ctx->vf_use_cnt);
- goto vf_injection_done;
- }
-
- /*
- * Inject this packet into the VF inerface.
- * On Hyper-V, multicast and brodcast packets
- * are only delivered on the synthetic interface
- * (after subjecting these to policy filters on
- * the host). Deliver these via the VF interface
- * in the guest.
- */
- vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
- packet, csum_info, *data,
- vlan_tci);
- if (vf_skb != NULL) {
- ++net_device_ctx->vf_netdev->stats.rx_packets;
- net_device_ctx->vf_netdev->stats.rx_bytes +=
- bytes_recvd;
- netif_receive_skb(vf_skb);
- } else {
- ++net->stats.rx_dropped;
- ret = NVSP_STAT_FAIL;
- }
- atomic_dec(&net_device_ctx->vf_use_cnt);
- return ret;
- }
-
-vf_injection_done:
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+ /*
+ * If necessary, inject this packet into the VF interface.
+ * On Hyper-V, multicast and brodcast packets are only delivered
+ * to the synthetic interface (after subjecting these to
+ * policy filters on the host). Deliver these via the VF
+ * interface in the guest.
+ */
+ vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
+ if (vf_netdev && (vf_netdev->flags & IFF_UP))
+ net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
@@ -719,12 +691,25 @@ vf_injection_done:
++net->stats.rx_dropped;
return NVSP_STAT_FAIL;
}
- skb_record_rx_queue(skb, channel->
- offermsg.offer.sub_channel_index);
+ if (net != vf_netdev)
+ skb_record_rx_queue(skb,
+ channel->offermsg.offer.sub_channel_index);
+
+ /*
+ * Even if injecting the packet, record the statistics
+ * on the synthetic device because modifying the VF device
+ * statistics will not work correctly.
+ */
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += packet->total_data_buflen;
+
+ if (skb->pkt_type == PACKET_BROADCAST)
+ ++rx_stats->broadcast;
+ else if (skb->pkt_type == PACKET_MULTICAST)
+ ++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
/*
@@ -967,7 +952,7 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
cpu);
struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
cpu);
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
unsigned int start;
do {
@@ -980,12 +965,14 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
rx_packets = rx_stats->packets;
rx_bytes = rx_stats->bytes;
+ rx_multicast = rx_stats->multicast + rx_stats->broadcast;
} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->tx_bytes += tx_bytes;
t->tx_packets += tx_packets;
t->rx_bytes += rx_bytes;
t->rx_packets += rx_packets;
+ t->multicast += rx_multicast;
}
t->tx_dropped = net->stats.tx_dropped;
@@ -1215,22 +1202,44 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
-static struct net_device *get_netvsc_net_device(char *mac)
+static struct net_device *get_netvsc_bymac(const u8 *mac)
{
- struct net_device *dev, *found = NULL;
+ struct net_device *dev;
ASSERT_RTNL();
for_each_netdev(&init_net, dev) {
- if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
- if (dev->netdev_ops != &device_ops)
- continue;
- found = dev;
- break;
- }
+ if (dev->netdev_ops != &device_ops)
+ continue; /* not a netvsc device */
+
+ if (ether_addr_equal(mac, dev->perm_addr))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+
+ for_each_netdev(&init_net, dev) {
+ struct net_device_context *net_device_ctx;
+
+ if (dev->netdev_ops != &device_ops)
+ continue; /* not a netvsc device */
+
+ net_device_ctx = netdev_priv(dev);
+ if (net_device_ctx->nvdev == NULL)
+ continue; /* device is removed */
+
+ if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
+ return dev; /* a match */
}
- return found;
+ return NULL;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
@@ -1238,9 +1247,8 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
struct net_device *ndev;
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
- if (eth_ops == NULL || eth_ops == &ethtool_ops)
+ if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
/*
@@ -1248,13 +1256,13 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* associate with the VF interface. If we don't find a matching
* synthetic interface, move on.
*/
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_bymac(vf_netdev->perm_addr);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || net_device_ctx->vf_netdev)
+ if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1262,46 +1270,26 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
- net_device_ctx->vf_netdev = vf_netdev;
- return NOTIFY_OK;
-}
-static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
-{
- net_device_ctx->vf_inject = true;
-}
-
-static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
-{
- net_device_ctx->vf_inject = false;
-
- /* Wait for currently active users to drain out. */
- while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
- udelay(50);
+ dev_hold(vf_netdev);
+ rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
+ return NOTIFY_OK;
}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
-
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
- netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
@@ -1327,23 +1315,15 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
-
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
-
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
- netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
@@ -1359,23 +1339,19 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
+
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
- netvsc_inject_disable(net_device_ctx);
- net_device_ctx->vf_netdev = NULL;
+
+ RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
+ dev_put(vf_netdev);
module_put(THIS_MODULE);
return NOTIFY_OK;
}
@@ -1427,10 +1403,6 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
- atomic_set(&net_device_ctx->vf_use_cnt, 0);
- net_device_ctx->vf_netdev = NULL;
- net_device_ctx->vf_inject = false;
-
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
@@ -1539,13 +1511,21 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ /* Skip our own events */
+ if (event_dev->netdev_ops == &device_ops)
+ return NOTIFY_DONE;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_dev->type != ARPHRD_ETHER)
+ return NOTIFY_DONE;
+
/* Avoid Vlan dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if (event_dev->priv_flags & IFF_BONDING &&
- event_dev->flags & IFF_MASTER)
+ if ((event_dev->priv_flags & IFF_BONDING) &&
+ (event_dev->flags & IFF_MASTER))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0becf0ac3926..ec387efb61d0 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -30,7 +30,7 @@
static int numlbs = 2;
static LIST_HEAD(fakelb_phys);
-static DEFINE_SPINLOCK(fakelb_phys_lock);
+static DEFINE_MUTEX(fakelb_phys_lock);
static LIST_HEAD(fakelb_ifup_phys);
static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
@@ -188,9 +188,9 @@ static int fakelb_add_one(struct device *dev)
if (err)
goto err_reg;
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_add_tail(&phy->list, &fakelb_phys);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return 0;
@@ -222,10 +222,10 @@ static int fakelb_probe(struct platform_device *pdev)
return 0;
err_slave:
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return err;
}
@@ -233,10 +233,10 @@ static int fakelb_remove(struct platform_device *pdev)
{
struct fakelb_phy *phy, *tmp;
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return 0;
}
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 695a5dc9ace3..7e0732f5ea07 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -23,11 +23,13 @@
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/addrconf.h>
+#include <net/l3mdev.h>
#define IPVLAN_DRV "ipvlan"
#define IPV_DRV_VER "0.1"
@@ -124,4 +126,8 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6);
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr);
+struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
+ u16 proto);
+unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b5f9511d819e..b4e990743e1d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -560,6 +560,7 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
case IPVLAN_MODE_L2:
return ipvlan_xmit_mode_l2(skb, dev);
case IPVLAN_MODE_L3:
+ case IPVLAN_MODE_L3S:
return ipvlan_xmit_mode_l3(skb, dev);
}
@@ -664,6 +665,8 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
return ipvlan_handle_mode_l2(pskb, port);
case IPVLAN_MODE_L3:
return ipvlan_handle_mode_l3(pskb, port);
+ case IPVLAN_MODE_L3S:
+ return RX_HANDLER_PASS;
}
/* Should not reach here */
@@ -672,3 +675,94 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
+
+static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ipvl_addr *addr = NULL;
+ struct ipvl_port *port;
+ void *lyr3h;
+ int addr_type;
+
+ if (!dev || !netif_is_ipvlan_port(dev))
+ goto out;
+
+ port = ipvlan_port_get_rcu(dev);
+ if (!port || port->mode != IPVLAN_MODE_L3S)
+ goto out;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+out:
+ return addr;
+}
+
+struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
+ u16 proto)
+{
+ struct ipvl_addr *addr;
+ struct net_device *sdev;
+
+ addr = ipvlan_skb_to_addr(skb, dev);
+ if (!addr)
+ goto out;
+
+ sdev = addr->master->dev;
+ switch (proto) {
+ case AF_INET:
+ {
+ int err;
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
+ ip4h->tos, sdev);
+ if (unlikely(err))
+ goto out;
+ break;
+ }
+ case AF_INET6:
+ {
+ struct dst_entry *dst;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct flowi6 fl6 = {
+ .flowi6_iif = sdev->ifindex,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ skb_dst_drop(skb);
+ dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
+ skb_dst_set(skb, dst);
+ break;
+ }
+ default:
+ break;
+ }
+
+out:
+ return skb;
+}
+
+unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct ipvl_addr *addr;
+ unsigned int len;
+
+ addr = ipvlan_skb_to_addr(skb, skb->dev);
+ if (!addr)
+ goto out;
+
+ skb->dev = addr->master->dev;
+ len = skb->len + ETH_HLEN;
+ ipvlan_count_rx(addr->master, len, true, false);
+out:
+ return NF_ACCEPT;
+}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 18b4e8c7f68a..f442eb366863 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -9,24 +9,87 @@
#include "ipvlan.h"
+static u32 ipvl_nf_hook_refcnt = 0;
+
+static struct nf_hook_ops ipvl_nfops[] __read_mostly = {
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+};
+
+static struct l3mdev_ops ipvl_l3mdev_ops __read_mostly = {
+ .l3mdev_l3_rcv = ipvlan_l3_rcv,
+};
+
static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
{
ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
}
-static void ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
+static int ipvlan_register_nf_hook(void)
+{
+ int err = 0;
+
+ if (!ipvl_nf_hook_refcnt) {
+ err = _nf_register_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+ if (!err)
+ ipvl_nf_hook_refcnt = 1;
+ } else {
+ ipvl_nf_hook_refcnt++;
+ }
+
+ return err;
+}
+
+static void ipvlan_unregister_nf_hook(void)
+{
+ WARN_ON(!ipvl_nf_hook_refcnt);
+
+ ipvl_nf_hook_refcnt--;
+ if (!ipvl_nf_hook_refcnt)
+ _nf_unregister_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+}
+
+static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
{
struct ipvl_dev *ipvlan;
+ struct net_device *mdev = port->dev;
+ int err = 0;
+ ASSERT_RTNL();
if (port->mode != nval) {
+ if (nval == IPVLAN_MODE_L3S) {
+ /* New mode is L3S */
+ err = ipvlan_register_nf_hook();
+ if (!err) {
+ mdev->l3mdev_ops = &ipvl_l3mdev_ops;
+ mdev->priv_flags |= IFF_L3MDEV_MASTER;
+ } else
+ return err;
+ } else if (port->mode == IPVLAN_MODE_L3S) {
+ /* Old mode was L3S */
+ mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook();
+ mdev->l3mdev_ops = NULL;
+ }
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- if (nval == IPVLAN_MODE_L3)
+ if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
ipvlan->dev->flags |= IFF_NOARP;
else
ipvlan->dev->flags &= ~IFF_NOARP;
}
port->mode = nval;
}
+ return err;
}
static int ipvlan_port_create(struct net_device *dev)
@@ -74,6 +137,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
+ if (port->mode == IPVLAN_MODE_L3S) {
+ dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook();
+ dev->l3mdev_ops = NULL;
+ }
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog);
@@ -132,7 +200,8 @@ static int ipvlan_open(struct net_device *dev)
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
- if (ipvlan->port->mode == IPVLAN_MODE_L3)
+ if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
+ ipvlan->port->mode == IPVLAN_MODE_L3S)
dev->flags |= IFF_NOARP;
else
dev->flags &= ~IFF_NOARP;
@@ -372,13 +441,14 @@ static int ipvlan_nl_changelink(struct net_device *dev,
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+ int err = 0;
if (data && data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- ipvlan_set_port_mode(port, nmode);
+ err = ipvlan_set_port_mode(port, nmode);
}
- return 0;
+ return err;
}
static size_t ipvlan_nl_getsize(const struct net_device *dev)
@@ -473,10 +543,13 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unregister_netdevice(dev);
return err;
}
+ err = ipvlan_set_port_mode(port, mode);
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
- ipvlan_set_port_mode(port, mode);
-
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
}
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 775674808249..92af182951be 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(mdio_bus);
mdiobus_free(mdio_bus);
- if (dev->of_node) {
- if (IS_ERR(pdata->clk))
- clk_disable_unprepare(pdata->clk);
- }
+ if (dev->of_node)
+ clk_disable_unprepare(pdata->clk);
return 0;
}
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 15f820648f82..7c00e508a101 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -55,7 +55,7 @@ static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
-int lan88xx_suspend(struct phy_device *phydev)
+static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index c09cc4a3d166..a17573e3bd8a 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -11,6 +11,8 @@
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/of.h>
+#include <dt-bindings/net/mscc-phy-vsc8531.h>
enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_0_2_NS = 0,
@@ -23,10 +25,24 @@ enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_3_4_NS = 7
};
+/* Microsemi VSC85xx PHY registers */
+/* IEEE 802. Std Registers */
+#define MSCC_PHY_EXT_PHY_CNTL_1 23
+#define MAC_IF_SELECTION_MASK 0x1800
+#define MAC_IF_SELECTION_GMII 0
+#define MAC_IF_SELECTION_RMII 1
+#define MAC_IF_SELECTION_RGMII 2
+#define MAC_IF_SELECTION_POS 11
+#define FAR_END_LOOPBACK_MODE_MASK 0x0008
+
#define MII_VSC85XX_INT_MASK 25
#define MII_VSC85XX_INT_MASK_MASK 0xa000
#define MII_VSC85XX_INT_STATUS 26
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define EDGE_RATE_CNTL_POS 5
+#define EDGE_RATE_CNTL_MASK 0x00E0
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
@@ -40,6 +56,23 @@ enum rgmii_rx_clock_delay {
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8541 0x00070770
+struct edge_rate_table {
+ u16 vddmac;
+ int slowdown[MSCC_SLOWDOWN_MAX];
+};
+
+struct edge_rate_table edge_table[MSCC_VDDMAC_MAX] = {
+ {3300, { 0, -2, -4, -7, -10, -17, -29, -53} },
+ {2500, { 0, -3, -6, -10, -14, -23, -37, -63} },
+ {1800, { 0, -5, -9, -16, -23, -35, -52, -76} },
+ {1500, { 0, -6, -14, -21, -29, -42, -58, -77} },
+};
+
+struct vsc8531_private {
+ u8 edge_slowdown;
+ u16 vddmac;
+};
+
static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
{
int rc;
@@ -48,6 +81,87 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
return rc;
}
+static u8 edge_rate_magic_get(u16 vddmac,
+ int slowdown)
+{
+ int rc = (MSCC_SLOWDOWN_MAX - 1);
+ u8 vdd;
+ u8 sd;
+
+ for (vdd = 0; vdd < MSCC_VDDMAC_MAX; vdd++) {
+ if (edge_table[vdd].vddmac == vddmac) {
+ for (sd = 0; sd < MSCC_SLOWDOWN_MAX; sd++) {
+ if (edge_table[vdd].slowdown[sd] <= slowdown) {
+ rc = (MSCC_SLOWDOWN_MAX - sd - 1);
+ break;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev,
+ u8 edge_rate)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc != 0)
+ goto out_unlock;
+ reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ reg_val &= ~(EDGE_RATE_CNTL_MASK);
+ reg_val |= (edge_rate << EDGE_RATE_CNTL_POS);
+ rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
+static int vsc85xx_mac_if_set(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+ reg_val &= ~(MAC_IF_SELECTION_MASK);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg_val |= (MAC_IF_SELECTION_RMII << MAC_IF_SELECTION_POS);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ reg_val |= (MAC_IF_SELECTION_GMII << MAC_IF_SELECTION_POS);
+ break;
+ default:
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+
+ rc = genphy_soft_reset(phydev);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
static int vsc85xx_default_config(struct phy_device *phydev)
{
int rc;
@@ -70,13 +184,60 @@ out_unlock:
return rc;
}
+#ifdef CONFIG_OF_MDIO
+static int vsc8531_of_init(struct phy_device *phydev)
+{
+ int rc;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+
+ if (!of_node)
+ return -ENODEV;
+
+ rc = of_property_read_u16(of_node, "vsc8531,vddmac",
+ &vsc8531->vddmac);
+ if (rc == -EINVAL)
+ vsc8531->vddmac = MSCC_VDDMAC_3300;
+ rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown",
+ &vsc8531->edge_slowdown);
+ if (rc == -EINVAL)
+ vsc8531->edge_slowdown = 0;
+
+ rc = 0;
+ return rc;
+}
+#else
+static int vsc8531_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc;
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u8 edge_rate;
+
+ rc = vsc8531_of_init(phydev);
+ if (rc)
+ return rc;
rc = vsc85xx_default_config(phydev);
if (rc)
return rc;
+
+ rc = vsc85xx_mac_if_set(phydev, phydev->interface);
+ if (rc)
+ return rc;
+
+ edge_rate = edge_rate_magic_get(vsc8531->vddmac,
+ -(int)vsc8531->edge_slowdown);
+ rc = vsc85xx_edge_rate_cntl_set(phydev, edge_rate);
+ if (rc)
+ return rc;
+
rc = genphy_config_init(phydev);
return rc;
@@ -109,6 +270,19 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
return rc;
}
+static int vsc85xx_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ return 0;
+}
+
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
@@ -126,6 +300,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -142,6 +317,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
}
};
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 9338f5812b7e..44d439f50961 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "5"
+#define NET_VERSION "6"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2551,6 +2551,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
}
}
+static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
+{
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
+ ocp_reg_write(tp, OCP_EEE_DATA, reg);
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
+}
+
+static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
+{
+ u16 data;
+
+ r8152_mmd_indirect(tp, dev, reg);
+ data = ocp_reg_read(tp, OCP_EEE_DATA);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+
+ return data;
+}
+
+static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
+{
+ r8152_mmd_indirect(tp, dev, reg);
+ ocp_reg_write(tp, OCP_EEE_DATA, data);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+}
+
+static void r8152_eee_en(struct r8152 *tp, bool enable)
+{
+ u16 config1, config2, config3;
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
+ config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
+ config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
+ config1 |= sd_rise_time(1);
+ config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
+ config3 |= fast_snr(42);
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
+ RX_QUIET_EN);
+ config1 |= sd_rise_time(7);
+ config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
+ config3 |= fast_snr(511);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
+ ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
+ ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
+}
+
+static void r8152b_enable_eee(struct r8152 *tp)
+{
+ r8152_eee_en(tp, true);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+}
+
+static void r8152b_enable_fc(struct r8152 *tp)
+{
+ u16 anar;
+
+ anar = r8152_mdio_read(tp, MII_ADVERTISE);
+ anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ r8152_mdio_write(tp, MII_ADVERTISE, anar);
+}
+
static void rtl8152_disable(struct r8152 *tp)
{
r8152_aldps_en(tp, false);
@@ -2560,13 +2631,9 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
- u16 data;
-
- data = r8152_mdio_read(tp, MII_BMCR);
- if (data & BMCR_PDOWN) {
- data &= ~BMCR_PDOWN;
- r8152_mdio_write(tp, MII_BMCR, data);
- }
+ r8152b_enable_eee(tp);
+ r8152_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
set_bit(PHY_RESET, &tp->flags);
}
@@ -2700,20 +2767,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
+static void r8153_aldps_en(struct r8152 *tp, bool enable)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ if (enable) {
+ data |= EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ } else {
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ msleep(20);
+ }
+}
+
+static void r8153_eee_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+ u16 config;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config |= EEE10_EN;
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config &= ~EEE10_EN;
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
- if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
- tp->version == RTL_VER_05)
- ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ /* disable ALDPS before updating the PHY parameters */
+ r8153_aldps_en(tp, false);
- data = r8152_mdio_read(tp, MII_BMCR);
- if (data & BMCR_PDOWN) {
- data &= ~BMCR_PDOWN;
- r8152_mdio_write(tp, MII_BMCR, data);
- }
+ /* disable EEE before updating the PHY parameters */
+ r8153_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -2744,6 +2843,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
sram_write(tp, SRAM_10M_AMP1, 0x00af);
sram_write(tp, SRAM_10M_AMP2, 0x0208);
+ r8153_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+
+ r8153_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
+
set_bit(PHY_RESET, &tp->flags);
}
@@ -2865,21 +2970,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8153_aldps_en(struct r8152 *tp, bool enable)
-{
- u16 data;
-
- data = ocp_reg_read(tp, OCP_POWER_CFG);
- if (enable) {
- data |= EN_ALDPS;
- ocp_reg_write(tp, OCP_POWER_CFG, data);
- } else {
- data &= ~EN_ALDPS;
- ocp_reg_write(tp, OCP_POWER_CFG, data);
- msleep(20);
- }
-}
-
static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
@@ -3245,103 +3335,6 @@ static int rtl8152_close(struct net_device *netdev)
return res;
}
-static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
-{
- ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
- ocp_reg_write(tp, OCP_EEE_DATA, reg);
- ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
-}
-
-static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
-{
- u16 data;
-
- r8152_mmd_indirect(tp, dev, reg);
- data = ocp_reg_read(tp, OCP_EEE_DATA);
- ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-
- return data;
-}
-
-static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
-{
- r8152_mmd_indirect(tp, dev, reg);
- ocp_reg_write(tp, OCP_EEE_DATA, data);
- ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-}
-
-static void r8152_eee_en(struct r8152 *tp, bool enable)
-{
- u16 config1, config2, config3;
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
- config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
- config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
- config1 |= sd_rise_time(1);
- config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
- config3 |= fast_snr(42);
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
- RX_QUIET_EN);
- config1 |= sd_rise_time(7);
- config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
- config3 |= fast_snr(511);
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
- ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
- ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
-}
-
-static void r8152b_enable_eee(struct r8152 *tp)
-{
- r8152_eee_en(tp, true);
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
-}
-
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
- u16 config;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config = ocp_reg_read(tp, OCP_EEE_CFG);
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config |= EEE10_EN;
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config &= ~EEE10_EN;
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153_enable_eee(struct r8152 *tp)
-{
- r8153_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
-}
-
-static void r8152b_enable_fc(struct r8152 *tp)
-{
- u16 anar;
-
- anar = r8152_mdio_read(tp, MII_ADVERTISE);
- anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- r8152_mdio_write(tp, MII_ADVERTISE, anar);
-}
-
static void rtl_tally_reset(struct r8152 *tp)
{
u32 ocp_data;
@@ -3354,10 +3347,17 @@ static void rtl_tally_reset(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
+ u16 data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
r8152_aldps_en(tp, false);
if (tp->version == RTL_VER_01) {
@@ -3379,9 +3379,6 @@ static void r8152b_init(struct r8152 *tp)
SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
- r8152b_enable_eee(tp);
- r8152_aldps_en(tp, true);
- r8152b_enable_fc(tp);
rtl_tally_reset(tp);
/* enable rx aggregation */
@@ -3393,12 +3390,12 @@ static void r8152b_init(struct r8152 *tp)
static void r8153_init(struct r8152 *tp)
{
u32 ocp_data;
+ u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
- r8153_aldps_en(tp, false);
r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
@@ -3415,6 +3412,23 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
+ if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+ tp->version == RTL_VER_05)
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON)
+ break;
+ msleep(20);
+ }
+
usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
@@ -3482,9 +3496,6 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
- r8153_enable_eee(tp);
- r8153_aldps_en(tp, true);
- r8152b_enable_fc(tp);
rtl_tally_reset(tp);
r8153_u2p3en(tp, true);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 2fd93b4c759a..b5554f2ebee4 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3186,7 +3186,6 @@ vmxnet3_tx_timeout(struct net_device *netdev)
netdev_err(adapter->netdev, "tx hang\n");
schedule_work(&adapter->work);
- netif_wake_queue(adapter->netdev);
}
@@ -3213,6 +3212,7 @@ vmxnet3_reset_work(struct work_struct *data)
}
rtnl_unlock();
+ netif_wake_queue(adapter->netdev);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index b99ad5df383d..766c63bf05c4 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -91,59 +91,37 @@ static int ath10k_ahb_clock_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
- int ret;
dev = &ar_ahb->pdev->dev;
- ar_ahb->cmd_clk = clk_get(dev, "wifi_wcss_cmd");
+ ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
ath10k_err(ar, "failed to get cmd clk: %ld\n",
PTR_ERR(ar_ahb->cmd_clk));
- ret = ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
- goto out;
+ return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
}
- ar_ahb->ref_clk = clk_get(dev, "wifi_wcss_ref");
+ ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
ath10k_err(ar, "failed to get ref clk: %ld\n",
PTR_ERR(ar_ahb->ref_clk));
- ret = ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
- goto err_cmd_clk_put;
+ return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
}
- ar_ahb->rtc_clk = clk_get(dev, "wifi_wcss_rtc");
+ ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
ath10k_err(ar, "failed to get rtc clk: %ld\n",
PTR_ERR(ar_ahb->rtc_clk));
- ret = ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
- goto err_ref_clk_put;
+ return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
}
return 0;
-
-err_ref_clk_put:
- clk_put(ar_ahb->ref_clk);
-
-err_cmd_clk_put:
- clk_put(ar_ahb->cmd_clk);
-
-out:
- return ret;
}
static void ath10k_ahb_clock_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk))
- clk_put(ar_ahb->cmd_clk);
-
- if (!IS_ERR_OR_NULL(ar_ahb->ref_clk))
- clk_put(ar_ahb->ref_clk);
-
- if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk))
- clk_put(ar_ahb->rtc_clk);
-
ar_ahb->cmd_clk = NULL;
ar_ahb->ref_clk = NULL;
ar_ahb->rtc_clk = NULL;
@@ -213,92 +191,51 @@ static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
- int ret;
dev = &ar_ahb->pdev->dev;
- ar_ahb->core_cold_rst = reset_control_get(dev, "wifi_core_cold");
- if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) {
+ ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold");
+ if (IS_ERR(ar_ahb->core_cold_rst)) {
ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->core_cold_rst));
- ret = ar_ahb->core_cold_rst ?
- PTR_ERR(ar_ahb->core_cold_rst) : -ENODEV;
- goto out;
+ return PTR_ERR(ar_ahb->core_cold_rst);
}
- ar_ahb->radio_cold_rst = reset_control_get(dev, "wifi_radio_cold");
- if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) {
+ ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold");
+ if (IS_ERR(ar_ahb->radio_cold_rst)) {
ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_cold_rst));
- ret = ar_ahb->radio_cold_rst ?
- PTR_ERR(ar_ahb->radio_cold_rst) : -ENODEV;
- goto err_core_cold_rst_put;
+ return PTR_ERR(ar_ahb->radio_cold_rst);
}
- ar_ahb->radio_warm_rst = reset_control_get(dev, "wifi_radio_warm");
- if (IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) {
+ ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm");
+ if (IS_ERR(ar_ahb->radio_warm_rst)) {
ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_warm_rst));
- ret = ar_ahb->radio_warm_rst ?
- PTR_ERR(ar_ahb->radio_warm_rst) : -ENODEV;
- goto err_radio_cold_rst_put;
+ return PTR_ERR(ar_ahb->radio_warm_rst);
}
- ar_ahb->radio_srif_rst = reset_control_get(dev, "wifi_radio_srif");
- if (IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) {
+ ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif");
+ if (IS_ERR(ar_ahb->radio_srif_rst)) {
ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_srif_rst));
- ret = ar_ahb->radio_srif_rst ?
- PTR_ERR(ar_ahb->radio_srif_rst) : -ENODEV;
- goto err_radio_warm_rst_put;
+ return PTR_ERR(ar_ahb->radio_srif_rst);
}
- ar_ahb->cpu_init_rst = reset_control_get(dev, "wifi_cpu_init");
- if (IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init");
+ if (IS_ERR(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
PTR_ERR(ar_ahb->cpu_init_rst));
- ret = ar_ahb->cpu_init_rst ?
- PTR_ERR(ar_ahb->cpu_init_rst) : -ENODEV;
- goto err_radio_srif_rst_put;
+ return PTR_ERR(ar_ahb->cpu_init_rst);
}
return 0;
-
-err_radio_srif_rst_put:
- reset_control_put(ar_ahb->radio_srif_rst);
-
-err_radio_warm_rst_put:
- reset_control_put(ar_ahb->radio_warm_rst);
-
-err_radio_cold_rst_put:
- reset_control_put(ar_ahb->radio_cold_rst);
-
-err_core_cold_rst_put:
- reset_control_put(ar_ahb->core_cold_rst);
-
-out:
- return ret;
}
static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
- if (!IS_ERR_OR_NULL(ar_ahb->core_cold_rst))
- reset_control_put(ar_ahb->core_cold_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_cold_rst))
- reset_control_put(ar_ahb->radio_cold_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_warm_rst))
- reset_control_put(ar_ahb->radio_warm_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->radio_srif_rst))
- reset_control_put(ar_ahb->radio_srif_rst);
-
- if (!IS_ERR_OR_NULL(ar_ahb->cpu_init_rst))
- reset_control_put(ar_ahb->cpu_init_rst);
-
ar_ahb->core_cold_rst = NULL;
ar_ahb->radio_cold_rst = NULL;
ar_ahb->radio_warm_rst = NULL;
@@ -572,6 +509,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
if (ar_ahb->irq < 0) {
ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ ret = ar_ahb->irq;
goto err_clock_deinit;
}
@@ -850,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
+ ret = -ENODEV;
goto err_halt_device;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 65d8d714e917..0b4d79659884 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -39,7 +39,7 @@
* chooses what to send (buffer address, length). The destination
* side keeps a supply of "anonymous receive buffers" available and
* it handles incoming data as it arrives (when the destination
- * recieves an interrupt).
+ * receives an interrupt).
*
* The sender may send a simple buffer (address/length) or it may
* send a small list of buffers. When a small list is sent, hardware
@@ -433,6 +433,13 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
u32 ctrl_addr = pipe->ctrl_addr;
+ u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+
+ /* Prevent CE ring stuck issue that will occur when ring is full.
+ * Make sure that write index is 1 less than read index.
+ */
+ if ((cur_write_idx + nentries) == dest_ring->sw_index)
+ nentries -= 1;
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 3a8984ba9f74..21ae8d663e67 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -68,6 +68,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -87,6 +88,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -105,6 +107,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -123,6 +126,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -141,6 +145,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -160,6 +165,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -184,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -208,6 +215,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -231,6 +239,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -249,6 +258,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -267,6 +277,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -292,6 +303,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
};
@@ -1960,7 +1972,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
- ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+ if (ar->max_num_vdevs >= 64)
+ ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+ else
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 6ec9495bcc04..dda49af1eb74 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -201,10 +201,10 @@ struct ath10k_fw_stats_pdev {
/* PDEV stats */
s32 ch_noise_floor;
- u32 tx_frame_count;
- u32 rx_frame_count;
- u32 rx_clear_count;
- u32 cycle_count;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 98c14247021b..0d2ed09f202b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -595,7 +595,7 @@ enum htt_rx_mpdu_status {
/* only accept EAPOL frames */
HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
- /* Non-data in promiscous mode */
+ /* Non-data in promiscuous mode */
HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
@@ -900,7 +900,7 @@ struct htt_rx_in_ord_ind {
* Purpose: indicate how many 32-bit integers follow the message header
* - NUM_CHARS
* Bits 31:16
- * Purpose: indicate how many 8-bit charaters follow the series of integers
+ * Purpose: indicate how many 8-bit characters follow the series of integers
*/
struct htt_rx_test {
u8 num_ints;
@@ -1042,10 +1042,10 @@ struct htt_dbg_stats_wal_tx_stats {
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a3785a9aa843..0b4c1562420f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1103,6 +1103,7 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
@@ -1119,8 +1120,8 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- rfc1042 += round_up(hdr_len, 4) +
- round_up(crypto_len, 4);
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
}
if (is_amsdu)
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index c2ecb9bd824a..675e75d66db2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -85,7 +85,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.ce7_base_address = 0x0004bc00,
/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
- * It is not really neccessary to assign address for newly supported
+ * It is not really necessary to assign address for newly supported
* CEs in this address table.
* Copy Engine Address
* CE8 0x0004c000
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 308e423d4b99..6038b7486f1d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -284,7 +284,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
-/* Known pecularities:
+/* Known peculiarities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
@@ -408,6 +408,9 @@ struct ath10k_hw_params {
bool sw_decrypt_mcast_mgmt;
const struct ath10k_hw_ops *hw_ops;
+
+ /* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+ int decap_align_bytes;
};
struct htt_rx_desc;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0a44dab5a287..76297d69f1ed 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2793,7 +2793,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
- ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ ath10k_warn(ar, "failed to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->def_wep_key_idx = -1;
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index aaf53a81e78b..a47cab44d9c8 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -405,7 +405,7 @@ Fw Mode/SubMode Mask
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
- * may be reclaimed when necesary
+ * may be reclaimed when necessary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 38993d72f5e6..54df425bb0fc 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3514,6 +3514,12 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
continue;
}
+ /* mac80211 would have already asked us to stop beaconing and
+ * bring the vdev down, so continue in that case
+ */
+ if (!arvif->is_up)
+ continue;
+
/* There are no completions for beacons so wait for next SWBA
* before telling mac80211 to decrement CSA counter
*
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 48e04b92e231..1b243c899bef 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -55,7 +55,7 @@
* type.
*
* 6. Comment each parameter part of the WMI command/event structure by
- * using the 2 stars at the begining of C comment instead of one star to
+ * using the 2 stars at the beginning of C comment instead of one star to
* enable HTML document generation using Doxygen.
*
*/
@@ -2087,7 +2087,7 @@ struct wmi_resource_config {
* In offload mode target supports features like WOW, chatter and
* other protocol offloads. In order to support them some
* functionalities like reorder buffering, PN checking need to be
- * done in target. This determines maximum number of peers suported
+ * done in target. This determines maximum number of peers supported
* by target in offload mode
*/
__le32 num_offload_peers;
@@ -2268,7 +2268,7 @@ struct wmi_resource_config {
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
@@ -2450,7 +2450,7 @@ struct wmi_resource_config_10x {
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
@@ -2744,7 +2744,7 @@ struct wmi_init_cmd {
struct wmi_host_mem_chunks mem_chunks;
} __packed;
-/* _10x stucture is from 10.X FW API */
+/* _10x structure is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
struct wmi_host_mem_chunks mem_chunks;
@@ -3967,7 +3967,7 @@ struct wmi_pdev_stats_tx {
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
/* wal pdev continous xretry */
@@ -4222,10 +4222,10 @@ struct wmi_10_2_stats_event {
*/
struct wmi_pdev_stats_base {
__le32 chan_nf;
- __le32 tx_frame_count;
- __le32 rx_frame_count;
- __le32 rx_clear_count;
- __le32 cycle_count;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
__le32 phy_err_count;
__le32 chan_tx_pwr;
} __packed;
@@ -4461,9 +4461,9 @@ struct wmi_vdev_start_request_cmd {
__le32 flags;
/* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
struct wmi_ssid ssid;
- /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+ /* beacon/probe response xmit rate. Applicable for SoftAP. */
__le32 bcn_tx_rate;
- /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+ /* beacon/probe response xmit power. Applicable for SoftAP. */
__le32 bcn_tx_power;
/* number of p2p NOA descriptor(s) from scan entry */
__le32 num_noa_descriptors;
@@ -4691,7 +4691,7 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_VDEV_PARAM_MGMT_TX_RATE,
@@ -4822,7 +4822,7 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_10X_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
@@ -5067,7 +5067,7 @@ struct wmi_vdev_simple_event {
} __packed;
/* VDEV start response status codes */
-/* VDEV succesfully started */
+/* VDEV successfully started */
#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
/* requested VDEV not found */
@@ -5383,7 +5383,7 @@ enum wmi_sta_ps_param_pspoll_count {
#define WMI_UAPSD_AC_TYPE_TRIG 1
#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
- ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+ (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index b8cf04d11975..3fd1cc98fd2f 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -3520,7 +3520,7 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
NO_SYNC_WMIFLAG);
- return 0;
+ return ret;
}
int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index f68cb00450e0..8f231c67dd51 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -180,7 +180,7 @@ config ATH9K_HTC_DEBUGFS
config ATH9K_HWRNG
bool "Random number generator support"
depends on ATH9K && (HW_RANDOM = y || HW_RANDOM = ATH9K)
- default y
+ default n
---help---
This option incorporates the ADC register output as a source of
randomness into Linux entropy pool (/dev/urandom and /dev/random)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index d1bc51f92686..038a960c5104 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -194,7 +194,7 @@ retry:
}
/* Check info buffer */
- info = (void *)&msg[1];
+ info = (void *)&bcdc->buf[0];
/* Copy info buffer */
if (buf) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 03404cbe9237..72139b579b18 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -420,7 +420,7 @@ u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- u32 data;
+ u32 data = 0;
int retval;
brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 748eaa68cf07..b777e1b2f87a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1595,15 +1595,9 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
val = 1;
brcmf_dbg(CONN, "shared key\n");
break;
- case NL80211_AUTHTYPE_AUTOMATIC:
- val = 2;
- brcmf_dbg(CONN, "automatic\n");
- break;
- case NL80211_AUTHTYPE_NETWORK_EAP:
- brcmf_dbg(CONN, "network eap\n");
default:
val = 2;
- brcmf_err("invalid auth type (%d)\n", sme->auth_type);
+ brcmf_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type);
break;
}
@@ -2533,7 +2527,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
WL_BSS_INFO_MAX);
if (err) {
brcmf_err("Failed to get bss info (%d)\n", err);
- return;
+ goto out_kfree;
}
si->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period);
@@ -2545,6 +2539,9 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+
+out_kfree:
+ kfree(buf);
}
static s32
@@ -3703,6 +3700,7 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
struct cfg80211_wowlan *wowl)
{
u32 wowl_config;
+ struct brcmf_wowl_wakeind_le wowl_wakeind;
u32 i;
brcmf_dbg(TRACE, "Suspend, wowl config.\n");
@@ -3744,8 +3742,9 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
wowl_config |= BRCMF_WOWL_UNASSOC;
- brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
- sizeof(struct brcmf_wowl_wakeind_le));
+ memcpy(&wowl_wakeind, "clear", 6);
+ brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", &wowl_wakeind,
+ sizeof(wowl_wakeind));
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -4502,6 +4501,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
bool mbss;
int is_11d;
+ bool supports_11d;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -4514,11 +4514,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
mbss = ifp->vif->mbss;
/* store current 11d setting */
- brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
- country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
- settings->beacon.tail_len,
- WLAN_EID_COUNTRY);
- is_11d = country_ie ? 1 : 0;
+ if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
+ &ifp->vif->is_11d)) {
+ supports_11d = false;
+ } else {
+ country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
+ settings->beacon.tail_len,
+ WLAN_EID_COUNTRY);
+ is_11d = country_ie ? 1 : 0;
+ supports_11d = true;
+ }
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -4577,7 +4582,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* Parameters shared by all radio interfaces */
if (!mbss) {
- if (is_11d != ifp->vif->is_11d) {
+ if ((supports_11d) && (is_11d != ifp->vif->is_11d)) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
if (err < 0) {
@@ -4619,7 +4624,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
- } else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
+ } else if (WARN_ON(supports_11d && (is_11d != ifp->vif->is_11d))) {
/* Multiple-BSS should use same 11d configuration */
err = -EINVAL;
goto exit;
@@ -4753,11 +4758,8 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("setting INFRA mode failed %d\n", err);
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
brcmf_fil_iovar_int_set(ifp, "mbss", 0);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
- ifp->vif->is_11d);
- if (err < 0)
- brcmf_err("restoring REGULATORY setting failed %d\n",
- err);
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+ ifp->vif->is_11d);
/* Bring device back up so it can be used again */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 65e8c8766441..5eaac13e2317 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -136,27 +136,6 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
err);
}
-static void
-_brcmf_set_mac_address(struct work_struct *work)
-{
- struct brcmf_if *ifp;
- s32 err;
-
- ifp = container_of(work, struct brcmf_if, setmacaddr_work);
-
- brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
-
- err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
- ETH_ALEN);
- if (err < 0) {
- brcmf_err("Setting cur_etheraddr failed, %d\n", err);
- } else {
- brcmf_dbg(TRACE, "MAC address updated to %pM\n",
- ifp->mac_addr);
- memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- }
-}
-
#if IS_ENABLED(CONFIG_IPV6)
static void _brcmf_update_ndtable(struct work_struct *work)
{
@@ -190,10 +169,20 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct sockaddr *sa = (struct sockaddr *)addr;
+ int err;
- memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
- schedule_work(&ifp->setmacaddr_work);
- return 0;
+ brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
+
+ err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data,
+ ETH_ALEN);
+ if (err < 0) {
+ brcmf_err("Setting cur_etheraddr failed, %d\n", err);
+ } else {
+ brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
+ memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
+ memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ }
+ return err;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
@@ -519,13 +508,9 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
ndev->needed_headroom += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- drvr->rxsz = ndev->mtu + ndev->hard_header_len +
- drvr->hdrlen;
-
/* set the mac address */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
@@ -730,7 +715,6 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
}
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
- cancel_work_sync(&ifp->setmacaddr_work);
cancel_work_sync(&ifp->multicast_work);
cancel_work_sync(&ifp->ndoffload_work);
}
@@ -886,9 +870,12 @@ static int brcmf_inet6addr_changed(struct notifier_block *nb,
}
break;
case NETDEV_DOWN:
- if (i < NDOL_MAX_ENTRIES)
- for (; i < ifp->ipv6addr_idx; i++)
+ if (i < NDOL_MAX_ENTRIES) {
+ for (; i < ifp->ipv6addr_idx - 1; i++)
table[i] = table[i + 1];
+ memset(&table[i], 0, sizeof(table[i]));
+ ifp->ipv6addr_idx--;
+ }
break;
default:
break;
@@ -1061,8 +1048,7 @@ fail:
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
- if (ifp)
- brcmf_net_detach(ifp->ndev, false);
+ brcmf_net_detach(ifp->ndev, false);
if (p2p_ifp)
brcmf_net_detach(p2p_ifp->ndev, false);
drvr->iflist[0] = NULL;
@@ -1169,7 +1155,8 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
!brcmf_get_pend_8021x_cnt(ifp),
MAX_WAIT_FOR_8021X_TX);
- WARN_ON(!err);
+ if (!err)
+ brcmf_err("Timed out waiting for no pending 802.1x packets\n");
return !err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 8fa34cad5a96..c94dcab260d0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -112,15 +112,11 @@ struct brcmf_pub {
/* Internal brcmf items */
uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint rxsz; /* Rx buffer size bus module should use */
/* Dongle media info */
char fwver[BRCMF_DRIVER_FIRMWARE_VERSION_LEN];
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- /* Multicast data packets sent to dongle */
- unsigned long tx_multicast;
-
struct mac_address addresses[BRCMF_MAX_IFS];
struct brcmf_if *iflist[BRCMF_MAX_IFS];
@@ -176,7 +172,6 @@ enum brcmf_netif_stop_reason {
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
* @stats: interface specific network statistics.
- * @setmacaddr_work: worker object for setting mac address.
* @multicast_work: worker object for multicast provisioning.
* @ndoffload_work: worker object for neighbor discovery offload configuration.
* @fws_desc: interface specific firmware-signalling descriptor.
@@ -193,7 +188,6 @@ struct brcmf_if {
struct brcmf_cfg80211_vif *vif;
struct net_device *ndev;
struct net_device_stats stats;
- struct work_struct setmacaddr_work;
struct work_struct multicast_work;
struct work_struct ndoffload_work;
struct brcmf_fws_mac_descriptor *fws_desc;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 7e269f9aa607..d0b738da2458 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -234,13 +234,20 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
{
+ struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
struct brcmf_flowring_ring *ring;
+ struct brcmf_if *ifp;
u16 hash_idx;
+ u8 ifidx;
struct sk_buff *skb;
ring = flow->rings[flowid];
if (!ring)
return;
+
+ ifidx = brcmf_flowring_ifidx_get(flow, flowid);
+ ifp = brcmf_get_ifp(bus_if->drvr, ifidx);
+
brcmf_flowring_block(flow, flowid, false);
hash_idx = ring->hash_id;
flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
@@ -249,7 +256,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
skb = skb_dequeue(&ring->skblist);
while (skb) {
- brcmu_pkt_buf_free_skb(skb);
+ brcmf_txfinalize(ifp, skb, false);
skb = skb_dequeue(&ring->skblist);
}
@@ -495,14 +502,18 @@ void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
} else {
search = flow->tdls_entry;
if (memcmp(search->mac, peer, ETH_ALEN) == 0)
- return;
+ goto free_entry;
while (search->next) {
search = search->next;
if (memcmp(search->mac, peer, ETH_ALEN) == 0)
- return;
+ goto free_entry;
}
search->next = tdls_entry;
}
flow->tdls_active = true;
+ return;
+
+free_entry:
+ kfree(tdls_entry);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 9f9024a7bd64..a190f535efc9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2104,8 +2104,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if ((skb->priority == 0) || (skb->priority > 7))
skb->priority = cfg80211_classify8021d(skb, NULL);
- drvr->tx_multicast += !!multicast;
-
if (fws->avoid_queueing) {
rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
if (rc < 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index b662cf35b033..c7509c51e9d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -46,15 +46,6 @@
*
******************************************************************************/
-static inline const struct fw_img *
-iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
-{
- if (ucode_type >= IWL_UCODE_TYPE_MAX)
- return NULL;
-
- return &priv->fw->img[ucode_type];
-}
-
/*
* Calibration
*/
@@ -330,7 +321,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type old_type;
static const u16 alive_cmd[] = { REPLY_ALIVE };
- fw = iwl_get_ucode_image(priv, ucode_type);
+ fw = iwl_get_ucode_image(priv->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 64690c14ff4d..d4b73dedf89b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,13 +73,13 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 24
-#define IWL3168_UCODE_API_MAX 24
+#define IWL7265D_UCODE_API_MAX 26
+#define IWL3168_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 16
-#define IWL7265_UCODE_API_MIN 16
-#define IWL7265D_UCODE_API_MIN 16
+#define IWL7260_UCODE_API_MIN 17
+#define IWL7265_UCODE_API_MIN 17
+#define IWL7265D_UCODE_API_MIN 17
#define IWL3168_UCODE_API_MIN 20
/* NVM versions */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 6c6725e808d4..d02ca1491d16 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,11 +70,11 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 24
-#define IWL8265_UCODE_API_MAX 24
+#define IWL8000_UCODE_API_MAX 26
+#define IWL8265_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 16
+#define IWL8000_UCODE_API_MIN 17
#define IWL8265_UCODE_API_MIN 20
/* NVM versions */
@@ -212,6 +212,17 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
.vht_mu_mimo_supported = true,
};
+const struct iwl_cfg iwl8275_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8275",
+ .fw_name_pre = IWL8265_FW_PRE,
+ IWL_DEVICE_8265,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .vht_mu_mimo_supported = true,
+};
+
const struct iwl_cfg iwl4165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 1fec6afbe041..ff850410d897 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,10 +55,10 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 24
+#define IWL9000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 16
+#define IWL9000_UCODE_API_MIN 17
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@@ -187,6 +187,17 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.integrated = true,
};
+const struct iwl_cfg iwl9560_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+};
+
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
index 4d78232c8afe..ea1618525878 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 24
+#define IWL_A000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 7008319532ef..2660cc4b9f8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -359,7 +359,6 @@ struct iwl_cfg {
high_temp:1,
mac_addr_from_csr:1,
lp_xtal_workaround:1,
- no_power_up_nic_in_init:1,
disable_dummy_notification:1,
apmg_not_supported:1,
mq_rx_supported:1,
@@ -445,6 +444,7 @@ extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
+extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
@@ -454,6 +454,7 @@ extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 871ad02fdb17..d73e9d436027 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -589,6 +589,8 @@ enum dtd_diode_reg {
* Causes for the FH register interrupts
*/
enum msix_fh_int_causes {
+ MSIX_FH_INT_CAUSES_Q0 = BIT(0),
+ MSIX_FH_INT_CAUSES_Q1 = BIT(1),
MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
MSIX_FH_INT_CAUSES_S2D = BIT(19),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
index 1d9dd153ef1c..50510fb6ab8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -33,9 +33,6 @@
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index dd75ea7c936e..33ef5372d195 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -643,6 +643,7 @@ struct iwl_rb_status {
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
+#define IWL_TFH_NUM_TBS 25
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
@@ -664,25 +665,29 @@ struct iwl_tfd_tb {
} __packed;
/**
- * struct iwl_tfd
+ * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
*
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
+ * This structure contains dma address and length of transmission address
*
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwl_tfh_tb {
+ __le16 tb_len;
+ __le64 addr;
+} __packed;
+
+/**
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ * contiguous 256 TFDs.
+ * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
*
- * Each TFD contains pointer/size information for up to 20 data buffers
+ * Each TFD contains pointer/size information for up to 20 / 25 data buffers
* in host DRAM. These buffers collectively contain the (one) frame described
* by the TFD. Each buffer must be a single contiguous block of memory within
* itself, but buffers may be scattered in host DRAM. Each buffer has max size
@@ -691,6 +696,16 @@ struct iwl_tfd_tb {
*
* A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
*/
+
+/**
+ * struct iwl_tfd - Transmit Frame Descriptor (TFD)
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ */
struct iwl_tfd {
u8 __reserved1[3];
u8 num_tbs;
@@ -698,6 +713,19 @@ struct iwl_tfd {
__le32 __pad;
} __packed;
+/**
+ * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ * 5 -15 reserved
+ * @ tbs[25] transmit frame buffer descriptors
+ * @ __pad padding
+ */
+struct iwl_tfh_tfd {
+ __le16 num_tbs;
+ struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
+ __le32 __pad;
+} __packed;
+
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
@@ -706,8 +734,13 @@ struct iwl_tfd {
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
+ * For devices up to a000:
+ * @tfd_offset 0-12 - tx command byte count
+ * 12-16 - station index
+ * For a000 and on:
* @tfd_offset 0-12 - tx command byte count
- * 12-16 - station index
+ * 12-13 - number of 64 byte chunks
+ * 14-16 - reserved
*/
struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 1b1e045f8907..ceec5ca2b1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -199,8 +199,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
- * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
- * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
@@ -210,36 +208,24 @@ struct iwl_ucode_capa {
* from the probe request template.
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in different bindings.
- * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
- * P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
- * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
- IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
@@ -249,24 +235,21 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
- * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
- * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
- * instead of 3.
- * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
- * (command version 3) that supports per-chain limits
+ * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
- IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
- IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
- IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
- IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 74ea68d1063c..5f229556339a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -329,4 +329,13 @@ iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
return conf_tlv->usniffer;
}
+static inline const struct fw_img *
+iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
+{
+ if (ucode_type >= IWL_UCODE_TYPE_MAX)
+ return NULL;
+
+ return &fw->img[ucode_type];
+}
+
#endif /* __iwl_fw_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 92c8b5f9a9cb..a9f69fdd170b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -267,7 +267,7 @@ static const char *get_rfh_string(int cmd)
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
- };
+ }
switch (cmd) {
IWL_CMD(RFH_RXF_DMA_CFG);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
index 8aa1f2b7fdfc..88f260db3744 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c
@@ -99,8 +99,12 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
continue;
for (i = 0; i < w->n_cmds; i++) {
- if (w->cmds[i] ==
- WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ u16 rec_id = WIDE_ID(pkt->hdr.group_id,
+ pkt->hdr.cmd);
+
+ if (w->cmds[i] == rec_id ||
+ (!iwl_cmd_groupid(w->cmds[i]) &&
+ DEF_ID(w->cmds[i]) == rec_id)) {
found = true;
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 43f8f7d45ddb..3bd6fc1b76d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -67,6 +67,7 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
@@ -564,11 +565,16 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
- /* If OEM did not fuse address - get it from OTP */
- if (!mac_addr0 && !mac_addr1) {
- mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
- mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
- }
+ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+ /*
+ * If the OEM fused a valid address, use it instead of the one in the
+ * OTP
+ */
+ if (is_valid_ether_addr(data->hw_addr))
+ return;
+
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
@@ -899,3 +905,91 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
return regd;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+
+#ifdef CONFIG_ACPI
+#define WRDD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+#define WRDD_WIGIG (0x10)
+
+static u32 iwl_wrdd_get_mcc(struct device *dev, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg, *domain_type, *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_EEPROM(dev, "Unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1 ; i < wrdd->package.count ; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
+ mcc_pkg->package.count < 2 ||
+ mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ mcc_pkg = NULL;
+ continue;
+ }
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value == WRDD_WIFI)
+ break;
+
+ mcc_pkg = NULL;
+ }
+
+ if (mcc_pkg) {
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+
+ return 0;
+}
+
+int iwl_get_bios_mcc(struct device *dev, char *mcc)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 mcc_val;
+
+ root_handle = ACPI_HANDLE(dev);
+ if (!root_handle) {
+ IWL_DEBUG_EEPROM(dev,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)WRDD_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_EEPROM(dev, "WRD method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WRDD with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_EEPROM(dev, "WRDC invocation failed (0x%x)\n",
+ status);
+ return -ENOENT;
+ }
+
+ mcc_val = iwl_wrdd_get_mcc(dev, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!mcc_val)
+ return -ENOENT;
+
+ mcc[0] = (mcc_val >> 8) & 0xff;
+ mcc[1] = mcc_val & 0xff;
+ mcc[2] = '\0';
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_get_bios_mcc);
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index d704d52aa7ec..7249e5b403f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -5,7 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -93,4 +94,21 @@ struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc);
+#ifdef CONFIG_ACPI
+/**
+ * iwl_get_bios_mcc - read MCC from BIOS, if available
+ *
+ * @dev: the struct device
+ * @mcc: output buffer (3 bytes) that will get the MCC
+ *
+ * This function tries to read the current MCC from ACPI if available.
+ */
+int iwl_get_bios_mcc(struct device *dev, char *mcc);
+#else
+static inline int iwl_get_bios_mcc(struct device *dev, char *mcc)
+{
+ return -ENOENT;
+}
+#endif
+
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 7beba9ae5617..2893826d7d2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -110,7 +110,7 @@ enum iwl_phy_db_section_type {
IWL_PHY_DB_MAX
};
-#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+#define PHY_DB_CMD 0x6c
/*
* phy db - configure operational ucode
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 6069a9ff53fa..d42cab291025 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -65,6 +65,7 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
+#include "iwl-fh.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
@@ -77,7 +78,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
static struct lock_class_key __key;
#endif
- trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+ trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
if (!trans)
return NULL;
@@ -102,18 +103,14 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
SLAB_HWCACHE_ALIGN,
NULL);
if (!trans->dev_cmd_pool)
- goto free;
+ return NULL;
return trans;
- free:
- kfree(trans);
- return NULL;
}
void iwl_trans_free(struct iwl_trans *trans)
{
kmem_cache_destroy(trans->dev_cmd_pool);
- kfree(trans);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
@@ -139,6 +136,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+ if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
+ cmd->id = DEF_ID(cmd->id);
+
ret = trans->ops->send_cmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 5535e2238da3..0296124a7f9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -153,6 +153,7 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
/* make u16 wide id out of u8 group and opcode */
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+#define DEF_ID(opcode) ((1 << 8) | (opcode))
/* due to the conversion, this group is special; new groups
* should be defined in the appropriate fw-api header files
@@ -262,8 +263,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* (i.e. mark it as non-idle).
* @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
* called after this command completes. Valid only with CMD_ASYNC.
- * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
- * check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
@@ -274,8 +273,6 @@ enum CMD_MODE {
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
CMD_WANT_ASYNC_CALLBACK = BIT(7),
-
- CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -488,7 +485,6 @@ struct iwl_hcmd_arr {
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: firmware supports wide host command header
* @sw_csum_tx: transport should compute the TCP checksum
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
@@ -510,7 +506,6 @@ struct iwl_trans_config {
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -649,6 +644,8 @@ struct iwl_trans_ops {
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
+ dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
+
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
@@ -772,6 +769,7 @@ enum iwl_plat_pm_mode {
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
+ * @wide_cmd_header: true when ucode supports wide command header format
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
@@ -823,6 +821,7 @@ struct iwl_trans {
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
+ bool wide_cmd_header;
u8 num_rx_queues;
@@ -1073,6 +1072,15 @@ static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
+static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
+ int queue)
+{
+ /* we should never be called if the trans doesn't support it */
+ BUG_ON(!trans->ops->get_txq_byte_table);
+
+ return trans->ops->get_txq_byte_table(trans, queue);
+}
+
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index b23271755daf..2d6f44fbaf62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -504,6 +504,28 @@ static inline char *iwl_dbgfs_is_match(char *name, char *buf)
return !strncmp(name, buf, len) ? buf + len : NULL;
}
+static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 curr_gp2;
+ u64 curr_os;
+ s64 diff;
+ char buf[64];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+ do_div(curr_os, NSEC_PER_USEC);
+ diff = curr_os - curr_gp2;
+ pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
char *buf,
size_t count, loff_t *ppos)
@@ -1530,6 +1552,8 @@ MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
+MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -1554,8 +1578,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
- (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
@@ -1570,6 +1593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff,
+ mvmvif->dbgfs_dir, S_IRUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index b34489817c70..539d718df797 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -917,6 +917,59 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
return ret ?: count;
}
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rx_cmd_buffer rxb = {
+ ._rx_page_order = 0,
+ .truesize = 0, /* not used */
+ ._offset = 0,
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_mpdu_desc *desc;
+ int bin_len = count / 2;
+ int ret = -EINVAL;
+
+ /* supporting only 9000 descriptor */
+ if (!mvm->trans->cfg->mq_rx_supported)
+ return -ENOTSUPP;
+
+ rxb._page = alloc_pages(GFP_ATOMIC, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, bin_len);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access */
+ if (bin_len < sizeof(*pkt) + sizeof(*desc))
+ goto out;
+
+ /* check this is RX packet */
+ if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
+ WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
+ goto out;
+
+ /* check the length in metadata matches actual received length */
+ desc = (void *)pkt->data;
+ if (le16_to_cpu(desc->mpdu_len) !=
+ (bin_len - sizeof(*desc) - sizeof(*pkt)))
+ goto out;
+
+ local_bh_disable();
+ iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1454,6 +1507,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1464,6 +1518,132 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
#endif
+static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd cmd = {};
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ };
+ size_t delta, len;
+ ssize_t ret;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+ cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+
+ /* Take care of alignment of both the position and the length */
+ delta = *ppos & 0x3;
+ cmd.addr = cpu_to_le32(*ppos - delta);
+ cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4,
+ (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ len = min((size_t)le32_to_cpu(rsp->len) << 2,
+ iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+ len = min(len - delta, count);
+ if (len < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd *cmd;
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {};
+ size_t cmd_size;
+ size_t data_size;
+ u32 op, len;
+ ssize_t ret;
+
+ hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+ DEBUG_GROUP, 0);
+
+ if (*ppos & 0x3 || count < 4) {
+ op = DEBUG_MEM_OP_WRITE_BYTES;
+ len = min(count, (size_t)(4 - (*ppos & 0x3)));
+ data_size = len;
+ } else {
+ op = DEBUG_MEM_OP_WRITE;
+ len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+ data_size = len << 2;
+ }
+
+ cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->op = cpu_to_le32(op);
+ cmd->len = cpu_to_le32(len);
+ cmd->addr = cpu_to_le32(*ppos);
+ if (copy_from_user((void *)cmd->data, user_buf, data_size)) {
+ kfree(cmd);
+ return -EFAULT;
+ }
+
+ hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ hcmd.data[0] = (void *)cmd;
+ hcmd.len[0] = cmd_size;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ kfree(cmd);
+
+ if (ret < 0)
+ return ret;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = data_size;
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static const struct file_operations iwl_dbgfs_mem_ops = {
+ .read = iwl_dbgfs_mem_read,
+ .write = iwl_dbgfs_mem_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
@@ -1502,6 +1682,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, S_IWUSR);
if (!debugfs_create_bool("enable_scan_iteration_notif",
S_IRUSR | S_IWUSR,
mvm->debugfs_dir,
@@ -1560,6 +1741,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
+ debugfs_create_file("mem", S_IRUSR | S_IWUSR, dbgfs_dir, mvm,
+ &iwl_dbgfs_mem_ops);
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
index 404b0de9e2dc..3fa43d1348a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -313,35 +313,26 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_ACK = 3,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
+#define IWL_NUM_CHAIN_LIMITS 2
+#define IWL_NUM_SUB_BANDS 5
+
/**
- * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
+ * @per_chain_restriction: per chain restrictions
*/
-struct iwl_dev_tx_power_cmd_v2 {
+struct iwl_dev_tx_power_cmd_v3 {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
__le16 dev_24;
__le16 dev_52_low;
__le16 dev_52_high;
-} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
-
-#define IWL_NUM_CHAIN_LIMITS 2
-#define IWL_NUM_SUB_BANDS 5
-
-/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v2: version 2 of the command, embedded here for easier software handling
- * @per_chain_restriction: per chain restrictions
- */
-struct iwl_dev_tx_power_cmd_v3 {
- /* v3 is just an extension of v2 - keep this here */
- struct iwl_dev_tx_power_cmd_v2 v2;
__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
index f01dab0d0dac..0c294c9f98e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -603,6 +604,8 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
+ * @reserved2: for future use and alignment
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
@@ -620,8 +623,10 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
- __le32 general_flags;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
+ __le16 general_flags;
+ u8 reserved2;
+ u8 scan_start_mac_id;
u8 extended_dwell;
u8 active_dwell;
u8 passive_dwell;
@@ -629,7 +634,7 @@ struct iwl_scan_req_umac {
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
u8 channel_flags;
u8 n_channels;
__le16 reserved;
@@ -718,8 +723,8 @@ struct iwl_scan_offload_profiles_query {
* @status: one of SCAN_COMP_STATUS_*
* @bt_status: BT on/off status
* @last_channel: last channel that was scanned
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
+ * @start_tsf: TSF timer in usecs of the scan start time for the mac specified
+ * in &struct iwl_scan_req_umac.
* @results: array of scan results, only "scanned_channels" of them are valid
*/
struct iwl_umac_scan_iter_complete_notif {
@@ -728,9 +733,8 @@ struct iwl_umac_scan_iter_complete_notif {
u8 status;
u8 bt_status;
u8 last_channel;
- __le32 tsf_low;
- __le32 tsf_high;
+ __le64 start_tsf;
struct iwl_scan_results_notif results[];
-} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 6b4c63a5e625..59ca97a11b2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -89,7 +89,6 @@
* @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
* Should be set for 26/30 length MAC headers
* @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
- * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
* @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
* @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
* @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
@@ -116,7 +115,6 @@ enum iwl_tx_flags {
TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18),
TX_CMD_FLG_MH_PAD = BIT(20),
TX_CMD_FLG_RESP_TO_DRV = BIT(21),
- TX_CMD_FLG_CCMP_AGG = BIT(22),
TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
TX_CMD_FLG_DUR = BIT(25),
TX_CMD_FLG_FW_DROP = BIT(26),
@@ -149,7 +147,7 @@ enum iwl_tx_pm_timeouts {
* @TX_CMD_SEC_EXT: extended cipher algorithm.
* @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
* @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
- * @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
+ * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
* from the table instead of from the TX command.
* If the key is taken from the key table its index should be given by the
* first byte of the TX command key field.
@@ -161,7 +159,7 @@ enum iwl_tx_cmd_sec_ctrl {
TX_CMD_SEC_EXT = 0x04,
TX_CMD_SEC_GCMP = 0x05,
TX_CMD_SEC_KEY128 = 0x08,
- TC_CMD_SEC_KEY_FROM_TABLE = 0x08,
+ TX_CMD_SEC_KEY_FROM_TABLE = 0x08,
};
/* TODO: how does these values are OK with only 16 bit variable??? */
@@ -578,6 +576,85 @@ struct iwl_mvm_ba_notif {
} __packed;
/**
+ * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the TFD queue
+ */
+struct iwl_mvm_compressed_ba_tfd {
+ u8 q_num;
+ u8 reserved;
+ __le16 tfd_index;
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwl_mvm_compressed_ba_ratid {
+ u8 q_num;
+ u8 tid;
+ __le16 ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwl_mvm_ba_resp_flags - TX aggregation status
+ * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ * expected time
+ */
+enum iwl_mvm_ba_resp_flags {
+ IWL_MVM_BA_RESP_TX_AGG,
+ IWL_MVM_BA_RESP_TX_BAR,
+ IWL_MVM_BA_RESP_TX_AGG_FAIL,
+ IWL_MVM_BA_RESP_TX_UNDERRUN,
+ IWL_MVM_BA_RESP_TX_BT_KILL,
+ IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwl_mvm_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @initial_rate: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ */
+struct iwl_mvm_compressed_ba_notif {
+ __le32 flags;
+ u8 sta_id;
+ u8 reduced_txp;
+ u8 initial_rate;
+ u8 retry_cnt;
+ __le32 query_byte_cnt;
+ __le16 query_frame_cnt;
+ __le16 txed;
+ __le16 done;
+ __le32 wireless_time;
+ __le32 tx_rate;
+ __le16 tfd_cnt;
+ __le16 ra_tid_cnt;
+ struct iwl_mvm_compressed_ba_tfd tfd[1];
+ struct iwl_mvm_compressed_ba_ratid ra_tid[0];
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+
+/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
* @tx: the tx commands associated with the beacon frame
* @template_id: currently equal to the mac context id of the coresponding
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 57b574b2a092..97633690f3d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -205,7 +205,7 @@ enum {
/* Phy */
PHY_CONFIGURATION_CMD = 0x6a,
CALIB_RES_NOTIF_PHY_DB = 0x6b,
- /* PHY_DB_CMD = 0x6c, */
+ PHY_DB_CMD = 0x6c,
/* ToF - 802.11mc FTM */
TOF_CMD = 0x10,
@@ -340,6 +340,11 @@ enum iwl_prot_offload_subcmd_ids {
STORED_BEACON_NTF = 0xFF,
};
+enum iwl_fmac_debug_cmds {
+ LMAC_RD_WR = 0x0,
+ UMAC_RD_WR = 0x1,
+};
+
/* command groups */
enum {
LEGACY_GROUP = 0x0,
@@ -349,6 +354,7 @@ enum {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
+ DEBUG_GROUP = 0xf,
};
/**
@@ -1977,8 +1983,9 @@ struct iwl_tdls_config_res {
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
-#define TX_FIFO_MAX_NUM 8
-#define RX_FIFO_MAX_NUM 2
+#define TX_FIFO_MAX_NUM_9000 8
+#define TX_FIFO_MAX_NUM 15
+#define RX_FIFO_MAX_NUM 2
#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
@@ -2004,6 +2011,21 @@ struct iwl_tdls_config_res {
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
* set, the last 3 members don't exist.
*/
+struct iwl_shared_mem_cfg_v1 {
+ __le32 shared_mem_addr;
+ __le32 shared_mem_size;
+ __le32 sample_buff_addr;
+ __le32 sample_buff_size;
+ __le32 txfifo_addr;
+ __le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
+ __le32 rxfifo_size[RX_FIFO_MAX_NUM];
+ __le32 page_buff_addr;
+ __le32 page_buff_size;
+ __le32 rxfifo_addr;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
@@ -2017,7 +2039,7 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
* VHT MU-MIMO group configuration
@@ -2133,4 +2155,48 @@ struct iwl_channel_switch_noa_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+/* Operation types for the debug mem access */
+enum {
+ DEBUG_MEM_OP_READ = 0,
+ DEBUG_MEM_OP_WRITE = 1,
+ DEBUG_MEM_OP_WRITE_BYTES = 2,
+};
+
+#define DEBUG_MEM_MAX_SIZE_DWORDS 32
+
+/**
+ * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory
+ * @op: DEBUG_MEM_OP_*
+ * @addr: address to read/write from/to
+ * @len: in dwords, to read/write
+ * @data: for write opeations, contains the source buffer
+ */
+struct iwl_dbg_mem_access_cmd {
+ __le32 op;
+ __le32 addr;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
+
+/* Status responses for the debug mem access */
+enum {
+ DEBUG_MEM_STATUS_SUCCESS = 0x0,
+ DEBUG_MEM_STATUS_FAILED = 0x1,
+ DEBUG_MEM_STATUS_LOCKED = 0x2,
+ DEBUG_MEM_STATUS_HIDDEN = 0x3,
+ DEBUG_MEM_STATUS_LENGTH = 0x4,
+};
+
+/**
+ * struct iwl_dbg_mem_access_rsp - Response to debug mem commands
+ * @status: DEBUG_MEM_STATUS_*
+ * @len: read dwords (0 for write operations)
+ * @data: contains the read DWs
+ */
+struct iwl_dbg_mem_access_rsp {
+ __le32 status;
+ __le32 len;
+ __le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 46b52bf705fb..d89d0a1fd34e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -440,14 +440,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
{ .start = 0x00a04560, .end = 0x00a0457c },
{ .start = 0x00a04590, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
- { .start = 0x00a44000, .end = 0x00a7bf80 },
};
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a05c00, .end = 0x00a05c18 },
{ .start = 0x00a05400, .end = 0x00a056e8 },
{ .start = 0x00a08000, .end = 0x00a098bc },
- { .start = 0x00adfc00, .end = 0x00adfd1c },
{ .start = 0x00a02400, .end = 0x00a02758 },
};
@@ -559,7 +557,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(struct iwl_fw_error_dump_fifo);
}
- for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
+ for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
if (!mem_cfg->txfifo_size[i])
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 47e8e70dcfac..872066317fa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -90,15 +90,6 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
-static inline const struct fw_img *
-iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
-{
- if (ucode_type >= IWL_UCODE_TYPE_MAX)
- return NULL;
-
- return &mvm->fw->img[ucode_type];
-}
-
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
@@ -592,9 +583,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
- fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
+ fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
else
- fw = iwl_get_ucode_image(mvm, ucode_type);
+ fw = iwl_get_ucode_image(mvm->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
@@ -838,59 +829,48 @@ out:
return ret;
}
-static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
{
- struct iwl_host_cmd cmd = {
- .flags = CMD_WANT_SKB,
- .data = { NULL, },
- .len = { 0, },
- };
- struct iwl_shared_mem_cfg *mem_cfg;
- struct iwl_rx_packet *pkt;
- u32 i;
+ struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
+ int i;
- lockdep_assert_held(&mvm->mutex);
+ mvm->shared_mem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
+ mvm->shared_mem_cfg.txfifo_size[i] =
+ le32_to_cpu(mem_cfg->txfifo_size[i]);
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
+ mvm->shared_mem_cfg.rxfifo_size[i] =
+ le32_to_cpu(mem_cfg->rxfifo_size[i]);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
- cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
- else
- cmd.id = SHARED_MEM_CFG;
+ BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ sizeof(mem_cfg->internal_txfifo_size));
- if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
- return;
+ for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++)
+ mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+}
- pkt = cmd.resp_pkt;
- mem_cfg = (void *)pkt->data;
-
- mvm->shared_mem_cfg.shared_mem_addr =
- le32_to_cpu(mem_cfg->shared_mem_addr);
- mvm->shared_mem_cfg.shared_mem_size =
- le32_to_cpu(mem_cfg->shared_mem_size);
- mvm->shared_mem_cfg.sample_buff_addr =
- le32_to_cpu(mem_cfg->sample_buff_addr);
- mvm->shared_mem_cfg.sample_buff_size =
- le32_to_cpu(mem_cfg->sample_buff_size);
- mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
- for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
+static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
+ int i;
+
+ mvm->shared_mem_cfg.num_txfifo_entries =
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
- mvm->shared_mem_cfg.page_buff_addr =
- le32_to_cpu(mem_cfg->page_buff_addr);
- mvm->shared_mem_cfg.page_buff_size =
- le32_to_cpu(mem_cfg->page_buff_size);
- /* new API has more data */
+ /* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- mvm->shared_mem_cfg.rxfifo_addr =
- le32_to_cpu(mem_cfg->rxfifo_addr);
- mvm->shared_mem_cfg.internal_txfifo_addr =
- le32_to_cpu(mem_cfg->internal_txfifo_addr);
-
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
@@ -900,6 +880,33 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
+}
+
+static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_host_cmd cmd = {
+ .flags = CMD_WANT_SKB,
+ .data = { NULL, },
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ else
+ cmd.id = SHARED_MEM_CFG;
+
+ if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
+ return;
+
+ pkt = cmd.resp_pkt;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_parse_shared_mem_a000(mvm, pkt);
+ else
+ iwl_mvm_parse_shared_mem(mvm, pkt);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
@@ -1039,19 +1046,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
{
struct iwl_mvm_sar_table sar_table;
struct iwl_dev_tx_power_cmd cmd = {
- .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int ret, i, j, idx;
int len = sizeof(cmd);
- /* we can't do anything with the table if the FW doesn't support it */
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_TX_POWER_CHAIN)) {
- IWL_DEBUG_RADIO(mvm,
- "FW doesn't support per-chain TX power settings.\n");
- return 0;
- }
-
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
@@ -1108,27 +1107,27 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* (for example, if we were in RFKILL)
*/
ret = iwl_run_init_mvm_ucode(mvm, false);
- if (ret && !iwlmvm_mod_params.init_dbg) {
+
+ if (iwlmvm_mod_params.init_dbg)
+ return 0;
+
+ if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* this can't happen */
if (WARN_ON(ret > 0))
ret = -ERFKILL;
goto error;
}
- if (!iwlmvm_mod_params.init_dbg) {
- /*
- * Stop and start the transport without entering low power
- * mode. This will save the state of other components on the
- * device that are triggered by the INIT firwmare (MFUART).
- */
- _iwl_trans_stop_device(mvm->trans, false);
- ret = _iwl_trans_start_hw(mvm->trans, false);
- if (ret)
- goto error;
- }
- if (iwlmvm_mod_params.init_dbg)
- return 0;
+ /*
+ * Stop and start the transport without entering low power
+ * mode. This will save the state of other components on the
+ * device that are triggered by the INIT firwmare (MFUART).
+ */
+ _iwl_trans_stop_device(mvm->trans, false);
+ ret = _iwl_trans_start_hw(mvm->trans, false);
+ if (ret)
+ goto error;
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
@@ -1226,9 +1225,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* TODO: read the budget from BIOS / Platform NVM */
- if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
+ if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
mvm->cooling_dev.cur_state);
+ if (ret)
+ goto error;
+ }
#else
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 69c42ce45b8a..6b962d6b067a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -539,6 +539,11 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
+ else
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
break;
case NL80211_IFTYPE_AP:
@@ -769,26 +774,6 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
cmd->ac[txf].fifos_mask = BIT(txf);
}
- if (vif->type == NL80211_IFTYPE_AP) {
- /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
- cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |=
- BIT(IWL_MVM_TX_FIFO_MCAST);
-
- /*
- * in AP mode, pass probe requests and beacons from other APs
- * (needed for ht protection); when there're no any associated
- * station don't ask FW to pass beacons to prevent unnecessary
- * wake-ups.
- */
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
- if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
- cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
- IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
- } else {
- IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
- }
- }
-
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
@@ -1186,6 +1171,7 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
*/
static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
struct iwl_mac_data_ap *ctxt_ap,
bool add)
{
@@ -1196,6 +1182,23 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
.beacon_device_ts = 0
};
+ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
+ /*
+ * in AP mode, pass probe requests and beacons from other APs
+ * (needed for ht protection); when there're no any associated
+ * station don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+ } else {
+ IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+ }
+
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->bi_reciprocal =
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
@@ -1253,7 +1256,7 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
/* Fill the data specific for ap mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap,
action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
@@ -1272,7 +1275,7 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
/* Fill the data specific for GO mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap,
action == FW_CTXT_ACTION_ADD);
cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9506e658abd3..318efd814037 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -479,13 +479,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->n_cipher_suites++;
}
- /*
- * Enable 11w if advertised by firmware and software crypto
- * is not enabled (as the firmware will interpret some mgmt
- * packets, so enabling it with software crypto isn't safe)
+ /* Enable 11w if software crypto is not enabled (as the
+ * firmware will interpret some mgmt packets, so enabling it
+ * with software crypto isn't safe).
*/
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
- !iwlwifi_mod_params.sw_crypto) {
+ if (!iwlwifi_mod_params.sw_crypto) {
ieee80211_hw_set(hw, MFP_CAPABLE);
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
@@ -547,9 +545,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
- hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
@@ -653,6 +649,16 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -720,6 +726,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
iwl_mvm_leds_exit(mvm);
+ if (mvm->cfg->vht_mu_mimo_supported)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+
return ret;
}
@@ -1259,20 +1269,18 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
struct iwl_dev_tx_power_cmd cmd = {
- .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v3.v2.mac_context_id =
+ .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v3.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
};
int len = sizeof(cmd);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
- len = sizeof(cmd.v3.v2);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -2229,6 +2237,10 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
case NL80211_IFTYPE_ADHOC:
iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
+ case NL80211_IFTYPE_MONITOR:
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mvm_update_mu_groups(mvm, vif);
+ break;
default:
/* shouldn't happen */
WARN_ON_ONCE(1);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2e30990c717e..d17cbf603f7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -604,16 +604,9 @@ enum iwl_mvm_tdls_cs_state {
};
struct iwl_mvm_shared_mem_cfg {
- u32 shared_mem_addr;
- u32 shared_mem_size;
- u32 sample_buff_addr;
- u32 sample_buff_size;
- u32 txfifo_addr;
+ int num_txfifo_entries;
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM];
- u32 page_buff_addr;
- u32 page_buff_size;
- u32 rxfifo_addr;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
@@ -828,6 +821,12 @@ struct iwl_mvm {
/* UMAC scan tracking */
u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+ /* start time of last scan in TSF of the mac that requested the scan */
+ u64 scan_start;
+
+ /* the vif that requested the current scan */
+ struct iwl_mvm_vif *scan_vif;
+
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -1269,6 +1268,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1305,8 +1305,6 @@ static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
}
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 7a686f67f007..eade099b6dbf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -66,7 +66,6 @@
*****************************************************************************/
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
-#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
@@ -751,96 +750,6 @@ exit:
return resp_cp;
}
-#ifdef CONFIG_ACPI
-#define WRD_METHOD "WRDD"
-#define WRDD_WIFI (0x07)
-#define WRDD_WIGIG (0x10)
-
-static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
-{
- union acpi_object *mcc_pkg, *domain_type, *mcc_value;
- u32 i;
-
- if (wrdd->type != ACPI_TYPE_PACKAGE ||
- wrdd->package.count < 2 ||
- wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
- wrdd->package.elements[0].integer.value != 0) {
- IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
- return 0;
- }
-
- for (i = 1 ; i < wrdd->package.count ; ++i) {
- mcc_pkg = &wrdd->package.elements[i];
-
- if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
- mcc_pkg->package.count < 2 ||
- mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
- mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
- mcc_pkg = NULL;
- continue;
- }
-
- domain_type = &mcc_pkg->package.elements[0];
- if (domain_type->integer.value == WRDD_WIFI)
- break;
-
- mcc_pkg = NULL;
- }
-
- if (mcc_pkg) {
- mcc_value = &mcc_pkg->package.elements[1];
- return mcc_value->integer.value;
- }
-
- return 0;
-}
-
-static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
-{
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- u32 mcc_val;
-
- root_handle = ACPI_HANDLE(mvm->dev);
- if (!root_handle) {
- IWL_DEBUG_LAR(mvm,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_LAR(mvm, "WRD method not found\n");
- return -ENOENT;
- }
-
- /* Call WRDD with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
- return -ENOENT;
- }
-
- mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
- kfree(wrdd.pointer);
- if (!mcc_val)
- return -ENOENT;
-
- mcc[0] = (mcc_val >> 8) & 0xff;
- mcc[1] = mcc_val & 0xff;
- mcc[2] = '\0';
- return 0;
-}
-#else /* CONFIG_ACPI */
-static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
-{
- return -ENOENT;
-}
-#endif
-
int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
{
bool tlv_lar;
@@ -884,7 +793,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_mvm_get_bios_mcc(mvm, mcc)) {
+ !iwl_get_bios_mcc(mvm->dev, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 55d9096da68c..05fe6dd1a2c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -359,6 +359,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(BT_COEX_CI),
HCMD_NAME(PHY_CONFIGURATION_CMD),
HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
+ HCMD_NAME(PHY_DB_CMD),
HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD),
@@ -652,11 +653,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* the hardware splits the A-MSDU */
if (mvm->cfg->mq_rx_supported)
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_WIDE_CMD_HDR);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
- trans_cfg.bc_table_dword = true;
+ trans->wide_cmd_header = true;
+ trans_cfg.bc_table_dword = true;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -711,37 +710,21 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"working without external nvm file\n");
- if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
- "not allowing power-up and not having nvm_file\n"))
+ err = iwl_trans_start_hw(mvm->trans);
+ if (err)
goto out_free;
- /*
- * Even if nvm exists in the nvm_file driver should read again the nvm
- * from the nic because there might be entries that exist in the OTP
- * and not in the file.
- * for nics with no_power_up_nic_in_init: rely completley on nvm_file
- */
- if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
- err = iwl_nvm_init(mvm, false);
- if (err)
- goto out_free;
- } else {
- err = iwl_trans_start_hw(mvm->trans);
- if (err)
- goto out_free;
-
- mutex_lock(&mvm->mutex);
- iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
- err = iwl_run_init_mvm_ucode(mvm, true);
- if (!err || !iwlmvm_mod_params.init_dbg)
- iwl_mvm_stop_device(mvm);
- iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
- mutex_unlock(&mvm->mutex);
- /* returns 0 if successful, 1 if success but in rfkill */
- if (err < 0 && !iwlmvm_mod_params.init_dbg) {
- IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
- goto out_free;
- }
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
+ err = iwl_run_init_mvm_ucode(mvm, true);
+ if (!err || !iwlmvm_mod_params.init_dbg)
+ iwl_mvm_stop_device(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
+ mutex_unlock(&mvm->mutex);
+ /* returns 0 if successful, 1 if success but in rfkill */
+ if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+ goto out_free;
}
scan_size = iwl_mvm_scan_size(mvm);
@@ -783,8 +766,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
flush_delayed_work(&mvm->fw_dump_wk);
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
- iwl_trans_op_mode_leave(trans);
+ iwl_trans_op_mode_leave(trans);
+
ieee80211_free_hw(mvm->hw);
return NULL;
}
@@ -857,9 +840,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
- struct list_head local_list;
-
- INIT_LIST_HEAD(&local_list);
+ LIST_HEAD(local_list);
/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
@@ -966,10 +947,11 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
- else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+ else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
@@ -981,13 +963,14 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
- else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
- pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
- else if (pkt->hdr.cmd == FRAME_RELEASE)
+ else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
@@ -1666,13 +1649,14 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
+ if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
- else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
- pkt->hdr.group_id == DATA_PATH_GROUP))
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
- else
+ else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index ff85865b1dda..af6d10c23e5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -694,8 +694,7 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
/* enable PM on p2p if p2p stand alone */
if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
- p2p_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
return;
}
@@ -707,12 +706,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
ap_mvmvif->phy_ctxt->id);
/* clients are not stand alone: enable PM if DCM */
- if (!(client_same_channel || ap_same_channel) &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ if (!(client_same_channel || ap_same_channel)) {
if (vifs->bss_active)
bss_mvmvif->pm_enabled = true;
- if (vifs->p2p_active &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
+ if (vifs->p2p_active)
p2p_mvmvif->pm_enabled = true;
return;
}
@@ -721,12 +718,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
* There is only one channel in the system and there are only
* bss and p2p clients that share it
*/
- if (client_same_channel && !vifs->ap_active &&
- (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+ if (client_same_channel && !vifs->ap_active) {
/* share same channel*/
bss_mvmvif->pm_enabled = true;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
- p2p_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 08d8a8abb918..a57c6ef5bc14 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -418,10 +418,11 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
ssn = ieee80211_sn_inc(ssn);
- /* holes are valid since nssn indicates frames were received. */
- if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
- continue;
- /* Empty the list. Will have more than one frame for A-MSDU */
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
@@ -434,7 +435,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
- while (!skb_peek_tail(&reorder_buf->entries[index]))
+ while (skb_queue_empty(&reorder_buf->entries[index]))
index = (index + 1) % reorder_buf->buf_size;
/* modify timer to match next frame's expiration time */
mod_timer(&reorder_buf->reorder_timer,
@@ -452,17 +453,17 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
u16 sn = 0, index = 0;
bool expired = false;
- spin_lock_bh(&buf->lock);
+ spin_lock(&buf->lock);
if (!buf->num_stored || buf->removed) {
- spin_unlock_bh(&buf->lock);
+ spin_unlock(&buf->lock);
return;
}
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (!skb_peek_tail(&buf->entries[index]))
+ if (skb_queue_empty(&buf->entries[index]))
continue;
if (!time_after(jiffies, buf->reorder_time[index] +
RX_REORDER_BUF_TIMEOUT_MQ))
@@ -492,7 +493,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
buf->reorder_time[index] +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
- spin_unlock_bh(&buf->lock);
+ spin_unlock(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
@@ -503,7 +504,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
struct iwl_mvm_reorder_buffer *reorder_buf;
u8 baid = data->baid;
- if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
return;
rcu_read_lock();
@@ -590,6 +591,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
+ /*
+ * This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ */
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
@@ -599,9 +605,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- /* not a data packet */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ /* not a data packet or a bar */
+ if (!ieee80211_is_back_req(hdr->frame_control) &&
+ (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)))
return false;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
@@ -625,6 +632,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock);
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ goto drop;
+ }
+
/*
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index dac120f8861b..f279fdd6eb44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -141,6 +141,7 @@ struct iwl_mvm_scan_params {
struct cfg80211_match_set *match_sets;
int n_scan_plans;
struct cfg80211_sched_scan_plan *scan_plans;
+ u32 measurement_dwell;
};
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -232,6 +233,27 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
return IWL_SCAN_TYPE_WILD;
}
+static int
+iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
+ struct cfg80211_scan_request *req,
+ struct iwl_mvm_scan_params *params)
+{
+ if (!req->duration)
+ return 0;
+
+ if (req->duration_mandatory &&
+ req->duration > scan_timing[params->type].max_out_time) {
+ IWL_DEBUG_SCAN(mvm,
+ "Measurement scan - too long dwell %hu (max out time %u)\n",
+ req->duration,
+ scan_timing[params->type].max_out_time);
+ return -EOPNOTSUPP;
+ }
+
+ return min_t(u32, (u32)req->duration,
+ scan_timing[params->type].max_out_time);
+}
+
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
@@ -717,22 +739,6 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
}
-static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
- enum iwl_scan_priority_ext prio)
-{
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
- return cpu_to_le32(prio);
-
- if (prio <= IWL_SCAN_PRIORITY_EXT_2)
- return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
-
- if (prio <= IWL_SCAN_PRIORITY_EXT_4)
- return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
-
- return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-}
-
static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_lmac *cmd,
struct iwl_mvm_scan_params *params)
@@ -743,7 +749,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
cmd->extended_dwell = scan_timing[params->type].dwell_extended;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
@@ -1033,21 +1039,24 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- cmd->extended_dwell = scan_timing[params->type].dwell_extended;
- cmd->active_dwell = scan_timing[params->type].dwell_active;
- cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ if (params->measurement_dwell) {
+ cmd->active_dwell = params->measurement_dwell;
+ cmd->passive_dwell = params->measurement_dwell;
+ cmd->extended_dwell = params->measurement_dwell;
+ } else {
+ cmd->active_dwell = scan_timing[params->type].dwell_active;
+ cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+ cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+ }
cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
- cmd->scan_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
else
- cmd->ooc_priority =
- iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1067,11 +1076,11 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
{
- int flags = 0;
+ u16 flags = 0;
if (params->n_ssids == 0)
flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
@@ -1093,6 +1102,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+ if (params->measurement_dwell)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->scan_iter_notif_enabled)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
@@ -1119,6 +1131,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->fw->ucode_capa.n_scan_channels;
int uid, i;
u32 ssid_bitmap = 0;
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
@@ -1136,8 +1149,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_uid_status[uid] = type;
cmd->uid = cpu_to_le32(uid);
- cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params,
+ cmd->general_flags = cpu_to_le16(iwl_mvm_scan_umac_flags(mvm, params,
vif));
+ cmd->scan_start_mac_id = scan_vif->id;
if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
@@ -1289,6 +1303,12 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_get_scan_type(mvm,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
+ if (ret < 0)
+ return ret;
+
+ params.measurement_dwell = ret;
+
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
@@ -1315,6 +1335,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
@@ -1437,9 +1458,12 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
struct cfg80211_scan_info info = {
.aborted = aborted,
+ .scan_start_tsf = mvm->scan_start,
};
+ memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_vif = NULL;
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
cancel_delayed_work(&mvm->scan_timeout_dwork);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
@@ -1473,6 +1497,8 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
u8 buf[256];
+ mvm->scan_start = le64_to_cpu(notif->start_tsf);
+
IWL_DEBUG_SCAN(mvm,
"UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
notif->status, notif->scanned_channels,
@@ -1485,6 +1511,10 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
ieee80211_sched_scan_results(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
}
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ mvm->scan_start);
}
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 30fc3afe5241..fc771885e383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -588,9 +588,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
ret);
/* Make sure the SCD wrptr is correctly set before reconfiguring */
- iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
- cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
- ssn, wdg_timeout);
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* Update the TID "owner" of the queue */
spin_lock_bh(&mvm->queue_info_lock);
@@ -747,14 +745,14 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
- u8 ac;
+ u8 txq_curr_ac;
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
spin_lock_bh(&mvm->queue_info_lock);
- ac = mvm->queue_info[queue].mac80211_ac;
+ txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -935,7 +933,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
/* If aggs should be turned back on - do it */
if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
- struct iwl_mvm_add_sta_cmd cmd;
+ struct iwl_mvm_add_sta_cmd cmd = {0};
mvmsta->tid_disable_agg &= ~BIT(tid);
@@ -1299,13 +1297,6 @@ err:
return ret;
}
-int iwl_mvm_update_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
-}
-
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
@@ -1498,9 +1489,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->sta_id, reserved_txq, *status)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -EINVAL;
+ }
+
+ *status = IWL_MVM_QUEUE_FREE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ }
+
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
/* if associated - we can't remove the AP STA now */
@@ -2030,11 +2043,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
- init_timer(&baid_data->session_timer);
- baid_data->session_timer.function =
- iwl_mvm_rx_agg_session_expired;
- baid_data->session_timer.data =
- (unsigned long)&mvm->baid_map[baid];
+ setup_timer(&baid_data->session_timer,
+ iwl_mvm_rx_agg_session_expired,
+ (unsigned long)&mvm->baid_map[baid]);
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 709542bbfce5..e068d5355865 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -473,9 +473,14 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int iwl_mvm_update_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+
+static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
+}
+
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 58fc7b3c711c..63a051be832e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -241,11 +241,8 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
};
u32 cmdid;
- if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
- cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
- PHY_OPS_GROUP, 0);
- else
- cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
+ cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ PHY_OPS_GROUP, 0);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
@@ -261,9 +258,6 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
DTS_MEASUREMENT_NOTIF_WIDE) };
int ret;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
- temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
-
lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 8b91544e6220..66957ac12ca4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -346,7 +346,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
rate_idx = info->control.rates[0].idx;
/* if the rate isn't a well known legacy rate, take the lowest one */
- if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY)
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
@@ -441,7 +441,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
* one.
* Need to handle this.
*/
- tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE;
tx_cmd->key[0] = keyconf->hw_key_idx;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
@@ -490,16 +490,34 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info, __le16 fc)
{
- if (iwl_mvm_is_dqa_supported(mvm)) {
- if (info->control.vif->type == NL80211_IFTYPE_AP &&
- ieee80211_is_probe_resp(fc))
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ return info->hw_queue;
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ /*
+ * handle legacy hostapd as well, where station may be added
+ * only after assoc.
+ */
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
- else if (ieee80211_is_mgmt(fc) &&
- info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
+
+ WARN_ON_ONCE(1);
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (ieee80211_is_mgmt(fc))
return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
- }
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
- return info->hw_queue;
+ WARN_ON_ONCE(1);
+ return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ default:
+ WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
+ return -1;
+ }
}
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
@@ -513,6 +531,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
+ /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+ * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+ * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue
+ */
+ if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+ IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+
memcpy(&info, skb->cb, sizeof(info));
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
@@ -526,16 +553,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)0;
- /*
- * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
- * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
- * queue. STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue
- */
- if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
- info.control.vif->type == NL80211_IFTYPE_STATION)
- IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
-
queue = info.hw_queue;
/*
@@ -560,6 +577,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
sta_id = mvmvif->bcast_sta.sta_id;
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
+ if (queue < 0)
+ return -1;
+
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
@@ -920,9 +940,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
tid = IWL_MAX_TID_COUNT;
}
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
txq_id = mvmsta->tid_data[tid].txq_id;
+ if (ieee80211_is_mgmt(fc))
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ }
+
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
@@ -1100,9 +1124,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- CMD_ASYNC);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ u8 mac80211_ac = tid_to_mac80211_ac[tid];
+
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+ vif->hw_queue[mac80211_ac], tid,
+ CMD_ASYNC);
+ }
tid_data->state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -1580,41 +1608,16 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
}
-static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
- struct iwl_mvm_ba_notif *ba_notif,
- struct iwl_mvm_tid_data *tid_data)
-{
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = ba_notif->txed_2_done;
- info->status.ampdu_len = ba_notif->txed;
- iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
- info);
- /* TODO: not accounted if the whole A-MPDU failed */
- info->status.tx_time = tid_data->tx_time;
- info->status.status_driver_data[0] =
- (void *)(uintptr_t)ba_notif->reduced_txp;
- info->status.status_driver_data[1] =
- (void *)(uintptr_t)tid_data->rate_n_flags;
-}
-
-void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ int txq, int index,
+ struct ieee80211_tx_info *ba_info, u32 rate)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct sk_buff *skb;
- int sta_id, tid, freed;
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
-
- sta_id = ba_notif->sta_id;
- tid = ba_notif->tid;
+ int freed;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
@@ -1634,10 +1637,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
- if (tid_data->txq_id != scd_flow) {
+ if (tid_data->txq_id != txq) {
IWL_ERR(mvm,
- "invalid BA notification: Q %d, tid %d, flow %d\n",
- tid_data->txq_id, tid, scd_flow);
+ "invalid BA notification: Q %d, tid %d\n",
+ tid_data->txq_id, tid);
rcu_read_unlock();
return;
}
@@ -1651,27 +1654,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway).
*/
- iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
- &reclaimed_skbs);
-
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
- (u8 *)&ba_notif->sta_addr_lo32,
- ba_notif->sta_id);
- IWL_DEBUG_TX_REPLY(mvm,
- "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
- ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
- (unsigned long long)le64_to_cpu(ba_notif->bitmap),
- scd_flow, ba_resp_scd_ssn, ba_notif->txed,
- ba_notif->txed_2_done);
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
- IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
- ba_notif->reduced_txp);
- tid_data->next_reclaimed = ba_resp_scd_ssn;
+ tid_data->next_reclaimed = index;
iwl_mvm_check_ratid_empty(mvm, sta, tid);
freed = 0;
+ ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -1693,8 +1683,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- if (freed == 1)
- iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
+ if (freed == 1) {
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ memcpy(&info->status, &ba_info->status,
+ sizeof(ba_info->status));
+ iwl_mvm_hwrate_to_tx_status(rate, info);
+ }
}
spin_unlock_bh(&mvmsta->lock);
@@ -1704,7 +1698,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Still it's important to update RS about sent vs. acked.
*/
if (skb_queue_empty(&reclaimed_skbs)) {
- struct ieee80211_tx_info ba_info = {};
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
@@ -1714,11 +1707,11 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
- ba_info.band = chanctx_conf->def.chan->band;
- iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
+ ba_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(rate, ba_info);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
}
out:
@@ -1730,6 +1723,92 @@ out:
}
}
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int sta_id, tid, txq, index;
+ struct ieee80211_tx_info ba_info = {};
+ struct iwl_mvm_ba_notif *ba_notif;
+ struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_sta *mvmsta;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_mvm_compressed_ba_notif *ba_res =
+ (void *)pkt->data;
+
+ sta_id = ba_res->sta_id;
+ ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
+ ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
+ ba_info.status.tx_time =
+ (u16)le32_to_cpu(ba_res->wireless_time);
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_res->reduced_txp;
+
+ /*
+ * TODO:
+ * When supporting multi TID aggregations - we need to move
+ * next_reclaimed to be per TXQ and not per TID or handle it
+ * in a different way.
+ * This will go together with SN and AddBA offload and cannot
+ * be handled properly for now.
+ */
+ WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
+ iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
+ (int)ba_res->tfd[0].q_num,
+ le16_to_cpu(ba_res->tfd[0].tfd_index),
+ &ba_info, le32_to_cpu(ba_res->tx_rate));
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+ return;
+ }
+
+ ba_notif = (void *)pkt->data;
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+ /* "flow" corresponds to Tx queue */
+ txq = le16_to_cpu(ba_notif->scd_flow);
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ index = le16_to_cpu(ba_notif->scd_ssn);
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (WARN_ON_ONCE(!mvmsta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
+ ba_info.status.ampdu_len = ba_notif->txed;
+ ba_info.status.tx_time = tid_data->tx_time;
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_notif->reduced_txp;
+
+ rcu_read_unlock();
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ le64_to_cpu(ba_notif->bitmap), txq, index,
+ ba_notif->txed, ba_notif->txed_2_done);
+
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+ ba_notif->reduced_txp);
+}
+
/*
* Note that there are transports that buffer frames before they reach
* the firmware. This means that after flush_tx_path is called, the
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 7c138fedcb19..d04babd99b53 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -512,7 +512,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000) {
+ if (base < 0x400000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@@ -1225,6 +1225,28 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
rcu_read_unlock();
}
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+{
+ bool ps_disabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Disable power save when reading GP2 */
+ ps_disabled = mvm->ps_disabled;
+ if (!ps_disabled) {
+ mvm->ps_disabled = true;
+ iwl_mvm_power_update_device(mvm);
+ }
+
+ *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ *boottime = ktime_get_boot_ns();
+
+ if (!ps_disabled) {
+ mvm->ps_disabled = ps_disabled;
+ iwl_mvm_power_update_device(mvm);
+ }
+}
+
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index c6e24fb286be..001be406a3d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -487,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
@@ -500,6 +501,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
@@ -523,6 +525,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 11e347dd44c7..cac6d99012b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -37,6 +37,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <linux/cpu.h>
#include "iwl-fh.h"
#include "iwl-csr.h"
@@ -49,7 +50,7 @@
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
-#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
/*
* RX related structures and functions
@@ -192,41 +193,9 @@ struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
+ u32 tbs;
};
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues.
- *
- * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
- * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
- * there might be HW changes in the future). For the normal TX
- * queues, n_window, which is the size of the software queue data
- * is also 256; however, for the command queue, n_window is only
- * 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
- * the software buffers (in the variables @meta, @txb in struct
- * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
- * the same struct) have 256.
- * This means that we end up with the following:
- * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
- * SW entries: | 0 | ... | 31 |
- * where N is a number between 0 and 7. This means that the SW
- * data is a window overlayed over the HW queue.
- */
-struct iwl_queue {
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
@@ -273,13 +242,32 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr: physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ * SW entries: | 0 | ... | 31 |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
*/
struct iwl_txq {
- struct iwl_queue q;
- struct iwl_tfd *tfds;
+ void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
@@ -294,6 +282,14 @@ struct iwl_txq {
bool block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
+
+ int write_ptr;
+ int read_ptr;
+ dma_addr_t dma_addr;
+ int n_window;
+ u32 id;
+ int low_mark;
+ int high_mark;
};
static inline dma_addr_t
@@ -309,6 +305,16 @@ struct iwl_tso_hdr_page {
};
/**
+ * enum iwl_shared_irq_flags - level of sharing for irq
+ * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
+ * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
+ */
+enum iwl_shared_irq_flags {
+ IWL_SHARED_IRQ_NON_RX = BIT(0),
+ IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -326,7 +332,6 @@ struct iwl_tso_hdr_page {
* @rx_buf_size: Rx buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
- * @wide_cmd_header: true when ucode supports wide command header format
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
@@ -338,8 +343,10 @@ struct iwl_tso_hdr_page {
* @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
- * @allocated_vector: the number of interrupt vector allocated by the OS
- * @default_irq_num: default irq for non rx interrupt
+ * @shared_vec_mask: the type of causes the shared vector handles
+ * (see iwl_shared_irq_flags).
+ * @alloc_vecs: the number of interrupt vectors allocated by the OS
+ * @def_irq: default irq for non rx causes
* @fh_init_mask: initial unmasked fh causes
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
@@ -391,11 +398,12 @@ struct iwl_trans_pcie {
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+ u8 max_tbs;
+ u16 tfd_size;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
bool scd_set_active;
- bool wide_cmd_header;
bool sw_csum_tx;
u32 rx_page_order;
@@ -410,12 +418,14 @@ struct iwl_trans_pcie {
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
- u32 allocated_vector;
- u32 default_irq_num;
+ u8 shared_vec_mask;
+ u32 alloc_vecs;
+ u32 def_irq;
u32 fh_init_mask;
u32 hw_init_mask;
u32 fh_mask;
u32 hw_mask;
+ cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
};
static inline struct iwl_trans_pcie *
@@ -474,6 +484,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -486,11 +497,20 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
+ u8 idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->tb_len);
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- return le16_to_cpu(tb->hi_n_len) >> 4;
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+ }
}
/*****************************************************
@@ -617,9 +637,9 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
+ if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
@@ -628,22 +648,22 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+ if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->q.id);
+ txq->id);
}
-static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
-static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
+static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 5c36e6d00622..6fe5546dc773 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -487,15 +487,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
while (pending) {
int i;
- struct list_head local_allocated;
+ LIST_HEAD(local_allocated);
gfp_t gfp_mask = GFP_KERNEL;
/* Do not post a warning if there are only a few requests */
if (pending < RX_PENDING_WATERMARK)
gfp_mask |= __GFP_NOWARN;
- INIT_LIST_HEAD(&local_allocated);
-
for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
struct iwl_rx_mem_buffer *rxb;
struct page *page;
@@ -1108,13 +1106,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
FH_RSCSR_RXQ_POS != rxq->id);
IWL_DEBUG_RX(trans,
- "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
+ "cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
rxcb._offset,
iwl_get_cmd_string(trans,
iwl_cmd_id(pkt->hdr.cmd,
pkt->hdr.group_id,
0)),
- pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence));
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
@@ -1142,7 +1141,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
if (rxq->id == 0)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
@@ -1885,6 +1884,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 0);
+ local_bh_enable();
+ }
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, 1);
+ local_bh_enable();
+ }
+
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 2f46eedd7c4d..ae95533e587d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1170,7 +1170,7 @@ static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
if (trans_pcie->msix_enabled) {
int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++)
+ for (i = 0; i < trans_pcie->alloc_vecs; i++)
synchronize_irq(trans_pcie->msix_entries[i].vector);
} else {
synchronize_irq(trans_pcie->pci_dev->irq);
@@ -1429,13 +1429,58 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+ int i;
+
+ /*
+ * Access all non RX causes and map them to the default irq.
+ * In case we are missing at least one interrupt vector,
+ * the first interrupt vector will serve non-RX and FBQ causes.
+ */
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+ iwl_clear_bit(trans, causes_list[i].mask_reg,
+ causes_list[i].cause_num);
+ }
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 offset =
+ trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+ u32 val, idx;
+
+ /*
+ * The first RX queue - fallback queue, which is designated for
+ * management frame, command responses etc, is always mapped to the
+ * first interrupt vector. The other RX queues are mapped to
+ * the other (N - 2) interrupt vectors.
+ */
+ val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+ for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+ MSIX_FH_INT_CAUSES_Q(idx - offset));
+ val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+ }
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+ val = MSIX_FH_INT_CAUSES_Q(0);
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+ val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
{
- u32 val, max_rx_vector, i;
struct iwl_trans *trans = trans_pcie->trans;
- max_rx_vector = trans_pcie->allocated_vector - 1;
-
if (!trans_pcie->msix_enabled) {
if (trans->cfg->mq_rx_supported)
iwl_write_prph(trans, UREG_CHICK,
@@ -1446,25 +1491,16 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/*
- * Each cause from the list above and the RX causes is represented as
- * a byte in the IVAR table. We access the first (N - 1) bytes and map
- * them to the (N - 1) vectors so these vectors will be used as rx
- * vectors. Then access all non rx causes and map them to the
- * default queue (N'th queue).
+ * Each cause from the causes list above and the RX causes is
+ * represented as a byte in the IVAR table. The first nibble
+ * represents the bound interrupt vector of the cause, the second
+ * represents no auto clear for this cause. This will be set if its
+ * interrupt vector is bound to serve other causes.
*/
- for (i = 0; i < max_rx_vector; i++) {
- iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
- iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
- BIT(MSIX_FH_INT_CAUSES_Q(i)));
- }
+ iwl_pcie_map_rx_causes(trans);
+
+ iwl_pcie_map_non_rx_causes(trans);
- for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
- val = trans_pcie->default_irq_num |
- MSIX_NON_AUTO_CLEAR_CAUSE;
- iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
- iwl_clear_bit(trans, causes_list[i].mask_reg,
- causes_list[i].cause_num);
- }
trans_pcie->fh_init_mask =
~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
trans_pcie->fh_mask = trans_pcie->fh_init_mask;
@@ -1477,40 +1513,55 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int max_irqs, num_irqs, i, ret, nr_online_cpus;
u16 pci_cmd;
- int max_vector;
- int ret, i;
-
- if (trans->cfg->mq_rx_supported) {
- max_vector = min_t(u32, (num_possible_cpus() + 2),
- IWL_MAX_RX_HW_QUEUES);
- for (i = 0; i < max_vector; i++)
- trans_pcie->msix_entries[i].entry = i;
-
- ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
- MSIX_MIN_INTERRUPT_VECTORS,
- max_vector);
- if (ret > 1) {
- IWL_DEBUG_INFO(trans,
- "Enable MSI-X allocate %d interrupt vector\n",
- ret);
- trans_pcie->allocated_vector = ret;
- trans_pcie->default_irq_num =
- trans_pcie->allocated_vector - 1;
- trans_pcie->trans->num_rx_queues =
- trans_pcie->allocated_vector - 1;
- trans_pcie->msix_enabled = true;
-
- return;
- }
+
+ if (!trans->cfg->mq_rx_supported)
+ goto enable_msi;
+
+ nr_online_cpus = num_online_cpus();
+ max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
+ for (i = 0; i < max_irqs; i++)
+ trans_pcie->msix_entries[i].entry = i;
+
+ num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+ MSIX_MIN_INTERRUPT_VECTORS,
+ max_irqs);
+ if (num_irqs < 0) {
IWL_DEBUG_INFO(trans,
- "ret = %d %s move to msi mode\n", ret,
- (ret == 1) ?
- "can't allocate more than 1 interrupt vector" :
- "failed to enable msi-x mode");
- pci_disable_msix(pdev);
+ "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
+ num_irqs);
+ goto enable_msi;
+ }
+ trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
+
+ IWL_DEBUG_INFO(trans,
+ "MSI-X enabled. %d interrupt vectors were allocated\n",
+ num_irqs);
+
+ /*
+ * In case the OS provides fewer interrupts than requested, different
+ * causes will share the same interrupt vector as follows:
+ * One interrupt less: non rx causes shared with FBQ.
+ * Two interrupts less: non rx causes shared with FBQ and RSS.
+ * More than two interrupts: we will use fewer RSS queues.
+ */
+ if (num_irqs <= nr_online_cpus) {
+ trans_pcie->trans->num_rx_queues = num_irqs + 1;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+ IWL_SHARED_IRQ_FIRST_RSS;
+ } else if (num_irqs == nr_online_cpus + 1) {
+ trans_pcie->trans->num_rx_queues = num_irqs;
+ trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+ } else {
+ trans_pcie->trans->num_rx_queues = num_irqs - 1;
}
+ trans_pcie->alloc_vecs = num_irqs;
+ trans_pcie->msix_enabled = true;
+ return;
+
+enable_msi:
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
@@ -1523,36 +1574,57 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
}
}
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+{
+ int iter_rx_q, i, ret, cpu, offset;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
+ iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+ offset = 1 + i;
+ for (; i < iter_rx_q ; i++) {
+ /*
+ * Get the cpu prior to the place to search
+ * (i.e. return will be > i - 1).
+ */
+ cpu = cpumask_next(i - offset, cpu_online_mask);
+ cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
+ ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->affinity_mask[i]);
+ if (ret)
+ IWL_ERR(trans_pcie->trans,
+ "Failed to set affinity mask for IRQ %d\n",
+ i);
+ }
+}
+
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie)
{
- int i, last_vector;
-
- last_vector = trans_pcie->trans->num_rx_queues;
+ int i;
- for (i = 0; i < trans_pcie->allocated_vector; i++) {
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
int ret;
-
- ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
- iwl_pcie_msix_isr,
- (i == last_vector) ?
- iwl_pcie_irq_msix_handler :
- iwl_pcie_irq_rx_msix_handler,
- IRQF_SHARED,
- DRV_NAME,
- &trans_pcie->msix_entries[i]);
+ struct msix_entry *msix_entry;
+
+ msix_entry = &trans_pcie->msix_entries[i];
+ ret = devm_request_threaded_irq(&pdev->dev,
+ msix_entry->vector,
+ iwl_pcie_msix_isr,
+ (i == trans_pcie->def_irq) ?
+ iwl_pcie_irq_msix_handler :
+ iwl_pcie_irq_rx_msix_handler,
+ IRQF_SHARED,
+ DRV_NAME,
+ msix_entry);
if (ret) {
- int j;
-
IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i);
- for (j = 0; j < i; j++)
- free_irq(trans_pcie->msix_entries[j].vector,
- &trans_pcie->msix_entries[j]);
- pci_disable_msix(pdev);
+
return ret;
}
}
+ iwl_pcie_irq_set_affinity(trans_pcie->trans);
return 0;
}
@@ -1681,7 +1753,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
- trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
@@ -1712,22 +1783,16 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_rx_free(trans);
if (trans_pcie->msix_enabled) {
- for (i = 0; i < trans_pcie->allocated_vector; i++)
- free_irq(trans_pcie->msix_entries[i].vector,
- &trans_pcie->msix_entries[i]);
+ for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+ irq_set_affinity_hint(
+ trans_pcie->msix_entries[i].vector,
+ NULL);
+ }
- pci_disable_msix(trans_pcie->pci_dev);
trans_pcie->msix_enabled = false;
} else {
- free_irq(trans_pcie->pci_dev->irq, trans);
-
iwl_pcie_free_ict(trans);
-
- pci_disable_msi(trans_pcie->pci_dev);
}
- iounmap(trans_pcie->hw_base);
- pci_release_regions(trans_pcie->pci_dev);
- pci_disable_device(trans_pcie->pci_dev);
iwl_pcie_free_fw_monitor(trans);
@@ -1899,7 +1964,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
txq->frozen = freeze;
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
goto next_queue;
if (freeze) {
@@ -1947,7 +2012,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
txq->block--;
if (!txq->block) {
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (i << 8));
+ txq->write_ptr | (i << 8));
}
} else if (block) {
txq->block++;
@@ -1967,14 +2032,14 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
int cnt;
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
+ txq->read_ptr, txq->write_ptr);
if (trans->cfg->use_tfh)
/* TODO: access new SCD registers and dump them */
return;
scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ SCD_TX_STTS_QUEUE_OFFSET(txq->id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -2009,7 +2074,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
int ret = 0;
@@ -2027,13 +2091,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
- wr_ptr = ACCESS_ONCE(q->write_ptr);
+ wr_ptr = ACCESS_ONCE(txq->write_ptr);
- while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+ u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2042,7 +2105,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
usleep_range(1000, 2000);
}
- if (q->read_ptr != q->write_ptr) {
+ if (txq->read_ptr != txq->write_ptr) {
IWL_ERR(trans,
"fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
@@ -2210,7 +2273,6 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
- struct iwl_queue *q;
char *buf;
int pos = 0;
int cnt;
@@ -2228,10 +2290,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
txq = &trans_pcie->txq[cnt];
- q = &txq->q;
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
- cnt, q->read_ptr, q->write_ptr,
+ cnt, txq->read_ptr, txq->write_ptr,
!!test_bit(cnt, trans_pcie->queue_used),
!!test_bit(cnt, trans_pcie->queue_stopped),
txq->need_update, txq->frozen,
@@ -2437,13 +2498,14 @@ err:
}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < IWL_NUM_OF_TBS; i++)
- cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+ for (i = 0; i < trans_pcie->max_tbs; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
}
@@ -2658,7 +2720,7 @@ static struct iwl_trans_dump_data
/* host commands */
len += sizeof(*data) +
- cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+ cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (trans_pcie->fw_mon_page) {
@@ -2726,12 +2788,13 @@ static struct iwl_trans_dump_data
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
spin_lock_bh(&cmdq->lock);
- ptr = cmdq->q.write_ptr;
- for (i = 0; i < cmdq->q.n_window; i++) {
- u8 idx = get_cmd_index(&cmdq->q, ptr);
+ ptr = cmdq->write_ptr;
+ for (i = 0; i < cmdq->n_window; i++) {
+ u8 idx = get_cmd_index(cmdq, ptr);
u32 caplen, cmdlen;
- cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
+ trans_pcie->tfd_size * ptr);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
@@ -2801,6 +2864,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
+ .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table,
+
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
@@ -2834,13 +2899,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans *trans;
int ret, addr_size;
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ERR_PTR(ret);
+
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
-
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->trans = trans;
@@ -2854,9 +2921,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
- ret = pci_enable_device(pdev);
- if (ret)
- goto out_no_pci;
if (!cfg->base_params->pcie_l1_allowed) {
/*
@@ -2874,6 +2938,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
else
addr_size = 36;
+ if (cfg->use_tfh) {
+ trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
+
+ } else {
+ trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
+
pci_set_master(pdev);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
@@ -2888,21 +2962,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* both attempts failed: */
if (ret) {
dev_err(&pdev->dev, "No suitable DMA available\n");
- goto out_pci_disable_device;
+ goto out_no_pci;
}
}
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
if (ret) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
- goto out_pci_disable_device;
+ dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
+ goto out_no_pci;
}
- trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+ trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
if (!trans_pcie->hw_base) {
- dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ dev_err(&pdev->dev, "pcim_iomap_table failed\n");
ret = -ENODEV;
- goto out_pci_release_regions;
+ goto out_no_pci;
}
/* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -2929,7 +3003,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
ret = iwl_pcie_prepare_card_hw(trans);
if (ret) {
IWL_WARN(trans, "Exit HW not ready\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
/*
@@ -2946,7 +3020,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
- goto out_pci_disable_msi;
+ goto out_no_pci;
}
if (iwl_trans_grab_nic_access(trans, &flags)) {
@@ -2978,15 +3052,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (trans_pcie->msix_enabled) {
if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
- goto out_pci_release_regions;
+ goto out_no_pci;
} else {
ret = iwl_pcie_alloc_ict(trans);
if (ret)
- goto out_pci_disable_msi;
+ goto out_no_pci;
- ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans);
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
if (ret) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
@@ -3004,12 +3079,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
-out_pci_disable_msi:
- pci_disable_msi(pdev);
-out_pci_release_regions:
- pci_release_regions(pdev);
-out_pci_disable_device:
- pci_disable_device(pdev);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
iwl_trans_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9636dc89f6bd..e9a278b60dfd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,7 +71,7 @@
*
***************************************************/
-static int iwl_queue_space(const struct iwl_queue *q)
+static int iwl_queue_space(const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
@@ -102,7 +102,7 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id)
{
q->n_window = slots_num;
q->id = id;
@@ -158,13 +158,13 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
spin_lock(&txq->lock);
/* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_unlock(&txq->lock);
return;
}
spin_unlock(&txq->lock);
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id,
jiffies_to_msecs(txq->wd_timeout));
iwl_trans_pcie_log_scd_error(trans, txq);
@@ -176,22 +176,21 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
* iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt)
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
u8 sec_ctl = 0;
- u8 sta_id = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *) txq->entries[txq->q.write_ptr].cmd->payload;
+ (void *)txq->entries[txq->write_ptr].cmd->payload;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- sta_id = tx_cmd->sta_id;
sec_ctl = tx_cmd->sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
@@ -205,14 +204,32 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
-
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
return;
- bc_ent = cpu_to_le16(len | (sta_id << 12));
+ if (trans->cfg->use_tfh) {
+ u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the
+ * TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ } else {
+ u8 sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+ }
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -227,12 +244,12 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ (void *)txq->entries[read_ptr].cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -240,6 +257,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
@@ -255,7 +273,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
- int txq_id = txq->q.id;
+ int txq_id = txq->id;
lockdep_assert_held(&txq->lock);
@@ -289,10 +307,10 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* if not in power-save mode, uCode will never sleep when we're
* trying to tx (during RFKILL, we're not trying to tx).
*/
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
if (!txq->block)
iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ txq->write_ptr | (txq_id << 8));
}
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
@@ -312,49 +330,93 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, int idx)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ return txq->tfds + trans_pcie->tfd_size * idx;
+}
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
- return addr;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ dma_addr_t hi_len;
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+ }
}
-static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd_fh->num_tbs = cpu_to_le16(idx + 1);
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+ struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+ u16 hi_n_len = len << 4;
- tb->hi_n_len = cpu_to_le16(hi_n_len);
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
- tfd->num_tbs = idx + 1;
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd_fh->num_tbs = idx + 1;
+ }
}
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
{
- return tfd->num_tbs & 0x1f;
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ } else {
+ struct iwl_tfd *tfd = _tfd;
+
+ return tfd->num_tbs & 0x1f;
+ }
}
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_txq *txq, int index)
{
- int i;
- int num_tbs;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
/* Sanity check on number of chunks */
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
@@ -363,18 +425,30 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
/* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
- if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+ if (meta->tbs & BIT(i))
dma_unmap_page(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd, i),
DMA_TO_DEVICE);
else
dma_unmap_single(trans->dev,
- iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i),
+ iwl_pcie_tfd_tb_get_addr(trans, tfd,
+ i),
+ iwl_pcie_tfd_tb_get_len(trans, tfd,
+ i),
DMA_TO_DEVICE);
}
- tfd->num_tbs = 0;
+
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ }
+
}
/*
@@ -388,20 +462,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
*/
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_tfd *tfd_tmp = txq->tfds;
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
- int rd_ptr = txq->q.read_ptr;
- int idx = get_cmd_index(&txq->q, rd_ptr);
+ int rd_ptr = txq->read_ptr;
+ int idx = get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
if (txq->entries) {
@@ -423,23 +495,21 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ void *tfd;
u32 num_tbs;
- q = &txq->q;
- tfd_tmp = txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
+ tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
if (reset)
- memset(tfd, 0, sizeof(*tfd));
+ memset(tfd, 0, trans_pcie->tfd_size);
- num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (num_tbs >= trans_pcie->max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
+ trans_pcie->max_tbs);
return -EINVAL;
}
@@ -447,7 +517,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
+ iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
return num_tbs;
}
@@ -457,7 +527,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
u32 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
size_t tb0_buf_sz;
int i;
@@ -468,7 +538,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
(unsigned long)txq);
txq->trans_pcie = trans_pcie;
- txq->q.n_window = slots_num;
+ txq->n_window = slots_num;
txq->entries = kcalloc(slots_num,
sizeof(struct iwl_pcie_txq_entry),
@@ -489,7 +559,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
+ &txq->dma_addr, GFP_KERNEL);
if (!txq->tfds)
goto error;
@@ -503,11 +573,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->first_tb_bufs)
goto err_free_tfds;
- txq->q.id = txq_id;
+ txq->id = txq_id;
return 0;
err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
error:
if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
@@ -531,7 +601,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, slots_num, txq_id);
+ ret = iwl_queue_init(txq, slots_num, txq_id);
if (ret)
return ret;
@@ -545,10 +615,10 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
return 0;
}
@@ -595,15 +665,14 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
+ while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, q->read_ptr);
+ txq_id, txq->read_ptr);
if (txq_id != trans_pcie->cmd_queue) {
- struct sk_buff *skb = txq->entries[q->read_ptr].skb;
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -611,15 +680,15 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
if (txq_id != trans_pcie->cmd_queue) {
IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- q->id);
+ txq->id);
iwl_trans_unref(trans);
} else {
iwl_pcie_clear_cmd_in_flight(trans);
@@ -663,7 +732,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc array of command/tx buffers */
if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
+ for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
}
@@ -671,13 +740,13 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
- txq->tfds, txq->q.dma_addr);
- txq->q.dma_addr = 0;
+ trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->dma_addr);
+ txq->dma_addr = 0;
txq->tfds = NULL;
dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->q.n_window,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
txq->first_tb_bufs, txq->first_tb_dma);
}
@@ -761,14 +830,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
if (trans->cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr);
+ txq->dma_addr);
else
iwl_write_direct32(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
- txq->q.dma_addr >> 8);
+ txq->dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
- txq->q.read_ptr = 0;
- txq->q.write_ptr = 0;
+ txq->read_ptr = 0;
+ txq->write_ptr = 0;
}
/* Tell NIC where to find the "keep warm" buffer */
@@ -1012,7 +1081,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
* if empty delete timer, otherwise move timer forward
* since we're making progress on this queue
*/
- if (txq->q.read_ptr == txq->q.write_ptr)
+ if (txq->read_ptr == txq->write_ptr)
del_timer(&txq->stuck_timer);
else
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
@@ -1025,7 +1094,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
- struct iwl_queue *q = &txq->q;
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1040,21 +1108,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
- if (txq->q.read_ptr == tfd_num)
+ if (txq->read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
+ txq_id, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
- if (!iwl_queue_used(q, last_to_free)) {
+ if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1062,9 +1130,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
- q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
- struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
+ txq->read_ptr != tfd_num;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
+ struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
@@ -1073,16 +1141,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ txq->entries[txq->read_ptr].skb = NULL;
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+ if (!trans->cfg->use_tfh)
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ if (iwl_queue_space(txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1114,12 +1183,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ if (iwl_queue_space(txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
- if (q->read_ptr == q->write_ptr) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
+ if (txq->read_ptr == txq->write_ptr) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
iwl_trans_unref(trans);
}
@@ -1181,31 +1250,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
- q->write_ptr, q->read_ptr);
+ txq->write_ptr, txq->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
+ for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
+ txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
+ idx, txq->write_ptr, txq->read_ptr);
iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1291,14 +1359,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
*/
iwl_scd_txq_disable_agg(trans, txq_id);
- ssn = txq->q.read_ptr;
+ ssn = txq->read_ptr;
}
}
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- txq->q.read_ptr = (ssn & 0xff);
- txq->q.write_ptr = (ssn & 0xff);
+ txq->read_ptr = (ssn & 0xff);
+ txq->write_ptr = (ssn & 0xff);
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
(ssn & 0xff) | (txq_id << 8));
@@ -1351,6 +1419,14 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
txq->ampdu = !shared_mode;
}
+dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->scd_bc_tbls.dma +
+ txq * sizeof(struct iwlagn_scd_bc_tbl);
+}
+
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@@ -1406,7 +1482,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
@@ -1421,7 +1496,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- if (WARN(!trans_pcie->wide_cmd_header &&
+ if (WARN(!trans->wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
@@ -1505,7 +1580,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1514,7 +1589,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- idx = get_cmd_index(q, q->write_ptr);
+ idx = get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
@@ -1533,7 +1608,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
copy_size = sizeof(struct iwl_cmd_header_wide);
@@ -1541,7 +1616,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
+ INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
cmd_pos = sizeof(struct iwl_cmd_header);
@@ -1591,7 +1666,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1607,8 +1682,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1631,8 +1706,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1640,8 +1715,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
- BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
- sizeof(out_meta->flags) * BITS_PER_BYTE);
+ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
out_meta->flags = cmd->flags;
if (WARN_ON_ONCE(txq->entries[idx].free_buf))
kzfree(txq->entries[idx].free_buf);
@@ -1650,7 +1724,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && txq->wd_timeout)
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
@@ -1662,7 +1736,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1700,20 +1774,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ trans_pcie->txq[trans_pcie->cmd_queue].read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
spin_lock_bh(&txq->lock);
- cmd_index = get_cmd_index(&txq->q, index);
+ cmd_index = get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
- iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
+ iwl_pcie_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -1826,14 +1900,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
HOST_COMPLETE_TIMEOUT);
if (!ret) {
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
iwl_get_cmd_string(trans, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
+ txq->read_ptr, txq->write_ptr);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
@@ -1911,7 +1984,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
- struct iwl_queue *q = &txq->q;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 tb2_len;
int i;
@@ -1926,8 +1999,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
@@ -1946,19 +2019,19 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
return -EINVAL;
}
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
- out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+ out_meta->tbs |= BIT(tb_idx);
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
@@ -2019,7 +2092,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- struct iwl_queue *q = &txq->q;
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
@@ -2033,8 +2105,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
+ iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
@@ -2190,7 +2262,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
return 0;
out_unmap:
- iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
return ret;
}
#else /* CONFIG_INET */
@@ -2214,9 +2286,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
- struct iwl_queue *q;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
+ void *tfd;
u16 len, tb1_len;
bool wait_write_ptr;
__le16 fc;
@@ -2225,7 +2297,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
bool amsdu;
txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
@@ -2247,7 +2318,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+ skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -2260,11 +2331,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(q) < q->high_mark) {
+ if (iwl_queue_space(txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(q) < 3)) {
+ if (unlikely(iwl_queue_space(txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2285,19 +2356,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE(txq->ampdu &&
- (wifi_seq & 0xff) != q->write_ptr,
+ (wifi_seq & 0xff) != txq->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
+ txq_id, wifi_seq, txq->write_ptr);
/* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
+ txq->entries[txq->write_ptr].skb = skb;
+ txq->entries[txq->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -2305,7 +2376,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
+ out_meta = &txq->entries[txq->write_ptr].meta;
out_meta->flags = 0;
/*
@@ -2330,7 +2401,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
IWL_FIRST_TB_SIZE, true);
@@ -2355,13 +2426,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
}
+ tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_pcie_tfd_get_num_tbs(trans, tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr) {
+ if (txq->read_ptr == txq->write_ptr) {
if (txq->wd_timeout) {
/*
* If the TXQ is active, then set the timer, if not,
@@ -2375,12 +2448,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
else
txq->frozen_expiry_remainder = txq->wd_timeout;
}
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
+ IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c7f2faa81921..39ce76ad00bc 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -484,6 +484,29 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
}
/*
+ * CFG802.11 operation handler to set default mgmt key.
+ */
+static int
+mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *netdev,
+ u8 key_index)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ struct mwifiex_ds_encrypt_key encrypt_key;
+
+ wiphy_dbg(wiphy, "set default mgmt key, key index=%d\n", key_index);
+
+ memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
+ encrypt_key.key_len = WLAN_KEY_LEN_CCMP;
+ encrypt_key.key_index = key_index;
+ encrypt_key.is_igtk_def_key = true;
+ eth_broadcast_addr(encrypt_key.mac_addr);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, true, &encrypt_key, true);
+}
+
+/*
* This function sends domain information to the firmware.
*
* The following information are passed to the firmware -
@@ -4082,6 +4105,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.leave_ibss = mwifiex_cfg80211_leave_ibss,
.add_key = mwifiex_cfg80211_add_key,
.del_key = mwifiex_cfg80211_del_key,
+ .set_default_mgmt_key = mwifiex_cfg80211_set_default_mgmt_key,
.mgmt_tx = mwifiex_cfg80211_mgmt_tx,
.mgmt_frame_register = mwifiex_cfg80211_mgmt_frame_register,
.remain_on_channel = mwifiex_cfg80211_remain_on_channel,
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 18aa5256e88b..4b1894b4757f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -78,6 +78,7 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_AES,
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
+ KEY_TYPE_ID_AES_CMAC_DEF,
};
#define WPA_PN_SIZE 8
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 70429815ff53..536ab834b126 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -260,6 +260,7 @@ struct mwifiex_ds_encrypt_key {
u8 is_igtk_key;
u8 is_current_wep_key;
u8 is_rx_seq_valid;
+ u8 is_igtk_def_key;
};
struct mwifiex_power_cfg {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 9b2e98cfe090..2478ccd6f2d9 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1369,12 +1369,12 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
struct mwifiex_private *priv;
int i;
+ if (!adapter)
+ goto exit_return;
+
if (down_interruptible(sem))
goto exit_sem_err;
- if (!adapter)
- goto exit_remove;
-
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
mwifiex_deauthenticate(priv, NULL);
@@ -1430,10 +1430,10 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_unlock();
}
-exit_remove:
up(sem);
exit_sem_err:
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+exit_return:
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 49048b41fd36..2a162c33d271 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -598,6 +598,11 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
memcpy(km->key_param_set.key_params.cmac_aes.key,
enc_key->key_material, enc_key->key_len);
len += sizeof(struct mwifiex_cmac_aes_param);
+ } else if (enc_key->is_igtk_def_key) {
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set CMAC default Key index\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC_DEF;
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
} else {
mwifiex_dbg(adapter, INFO,
"%s: Set AES Key\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 3344a26bed03..8548027abf71 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1049,8 +1049,10 @@ mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
enum nl80211_band band;
chan = *buf++;
- if (!chan)
+ if (!chan) {
+ kfree(regd);
return NULL;
+ }
chflags = *buf++;
band = (chan <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
freq = ieee80211_channel_to_frequency(chan, band);
@@ -1116,6 +1118,7 @@ static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
u16 action = le16_to_cpu(reg->action);
u16 tlv, tlv_buf_len, tlv_buf_left;
struct mwifiex_ie_types_header *head;
+ struct ieee80211_regdomain *regd;
u8 *tlv_buf;
if (action != HostCmd_ACT_GEN_GET)
@@ -1137,10 +1140,10 @@ static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, CMD_D, "CHAN:",
(u8 *)head + sizeof(*head),
tlv_buf_len);
- priv->adapter->regd =
- mwifiex_create_custom_regdomain(priv,
- (u8 *)head +
- sizeof(*head), tlv_buf_len);
+ regd = mwifiex_create_custom_regdomain(priv,
+ (u8 *)head + sizeof(*head), tlv_buf_len);
+ if (!IS_ERR(regd))
+ priv->adapter->regd = regd;
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 8a20620fa119..73eb0846db21 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -273,6 +273,8 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
} else {
mwifiex_dbg(adapter, DATA,
"%s: DATA\n", __func__);
+ mwifiex_write_data_complete(adapter, context->skb, 0,
+ urb->status ? -1 : 0);
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
port = &card->port[i];
if (context->ep == port->tx_data_ep) {
@@ -282,8 +284,6 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
}
}
adapter->data_sent = false;
- mwifiex_write_data_complete(adapter, context->skb, 0,
- urb->status ? -1 : 0);
}
if (card->mc_resync_flag)
@@ -841,7 +841,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
struct usb_tx_data_port *port = NULL;
u8 *data = (u8 *)skb->data;
struct urb *tx_urb;
- int idx, ret;
+ int idx, ret = -EINPROGRESS;
if (adapter->is_suspended) {
mwifiex_dbg(adapter, ERROR,
@@ -865,8 +865,9 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
if (atomic_read(&port->tx_data_urb_pending)
>= MWIFIEX_TX_DATA_URB) {
port->block_status = true;
- ret = -EBUSY;
- goto done;
+ adapter->data_sent =
+ mwifiex_usb_data_sent(adapter);
+ return -EBUSY;
}
if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
port->tx_data_ix = 0;
@@ -897,6 +898,14 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
else
atomic_inc(&port->tx_data_urb_pending);
+ if (ep != card->tx_cmd_ep &&
+ atomic_read(&port->tx_data_urb_pending) ==
+ MWIFIEX_TX_DATA_URB) {
+ port->block_status = true;
+ adapter->data_sent = mwifiex_usb_data_sent(adapter);
+ ret = -ENOSR;
+ }
+
if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
mwifiex_dbg(adapter, ERROR,
"%s: usb_submit_urb failed\n", __func__);
@@ -905,29 +914,15 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
} else {
atomic_dec(&port->tx_data_urb_pending);
port->block_status = false;
+ adapter->data_sent = false;
if (port->tx_data_ix)
port->tx_data_ix--;
else
port->tx_data_ix = MWIFIEX_TX_DATA_URB;
}
-
- return -1;
- } else {
- if (ep != card->tx_cmd_ep &&
- atomic_read(&port->tx_data_urb_pending) ==
- MWIFIEX_TX_DATA_URB) {
- port->block_status = true;
- ret = -ENOSR;
- goto done;
- }
+ ret = -1;
}
- return -EINPROGRESS;
-
-done:
- if (ep != card->tx_cmd_ep)
- adapter->data_sent = mwifiex_usb_data_sent(adapter);
-
return ret;
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1f54b8928d3c..1016628926d2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -29,6 +29,7 @@
#define RTL8XXXU_DEBUG_H2C 0x800
#define RTL8XXXU_DEBUG_ACTION 0x1000
#define RTL8XXXU_DEBUG_EFUSE 0x2000
+#define RTL8XXXU_DEBUG_INTERRUPT 0x4000
#define RTW_USB_CONTROL_MSG_TIMEOUT 500
#define RTL8XXXU_MAX_REG_POLL 500
@@ -1318,7 +1319,7 @@ struct rtl8xxxu_fileops {
int (*power_on) (struct rtl8xxxu_priv *priv);
void (*power_off) (struct rtl8xxxu_priv *priv);
void (*reset_8051) (struct rtl8xxxu_priv *priv);
- int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+ int (*llt_init) (struct rtl8xxxu_priv *priv);
void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
@@ -1400,14 +1401,14 @@ int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start);
int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv);
int rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv,
struct h2c_cmd *h2c, int len);
int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv);
void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 841522ee6379..df54d27e7851 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1396,6 +1396,114 @@ exit:
return ret;
}
+static int rtl8192eu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retry, retval;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ retry = 100;
+ retval = -EBUSY;
+ /*
+ * Poll 32 bit wide 0x05f8 for 0x00000000 to ensure no TX is pending.
+ */
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD);
+ if (!val32) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ if (!retry) {
+ dev_warn(dev, "Failed to flush TX queue\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Reset whole BB */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /* Reset MAC TRX */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 &= 0xff00;
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 &= ~CR_SECURITY_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+out:
+ return retval;
+}
+
+static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ int count, ret = 0;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* Switch DPDT_SEL_P output from register 0x65[2] */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* 0x04[12:11] = 01 enable WL suspend */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ return 0;
+}
+
static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
{
u16 val16;
@@ -1446,6 +1554,40 @@ exit:
return ret;
}
+void rtl8192eu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ rtl8192eu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8xxxu_reset_8051(priv);
+
+ rtl8192eu_active_to_emu(priv);
+ rtl8192eu_emu_to_disabled(priv);
+}
+
static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
{
u32 val32;
@@ -1487,7 +1629,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.parse_efuse = rtl8192eu_parse_efuse,
.load_firmware = rtl8192eu_load_firmware,
.power_on = rtl8192eu_power_on,
- .power_off = rtl8xxxu_power_off,
+ .power_off = rtl8192eu_power_off,
.reset_8051 = rtl8xxxu_reset_8051,
.llt_init = rtl8xxxu_auto_llt_table,
.init_phy_bb = rtl8192eu_init_phy_bb,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index ca92022f9d50..b2d7f6e69667 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -2472,10 +2472,13 @@ static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
return ret;
}
-int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
{
int ret;
int i;
+ u8 last_tx_page;
+
+ last_tx_page = priv->fops->total_page_num;
for (i = 0; i < last_tx_page; i++) {
ret = rtl8xxxu_llt_write(priv, i, i + 1);
@@ -2503,7 +2506,7 @@ exit:
return ret;
}
-int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv)
{
u32 val32;
int ret = 0;
@@ -3869,7 +3872,7 @@ static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
- pubq = fops->total_page_num - hq - lq - nq;
+ pubq = fops->total_page_num - hq - lq - nq - 1;
val32 = RQPN_LOAD;
val32 |= (hq << RQPN_HI_PQ_SHIFT);
@@ -3883,6 +3886,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_fileops *fops = priv->fops;
bool macpower;
int ret;
u8 val8;
@@ -3901,7 +3905,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
- ret = priv->fops->power_on(priv);
+ ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
goto exit;
@@ -3918,7 +3922,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Set RX page boundary
*/
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
ret = rtl8xxxu_download_firmware(priv);
dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
@@ -3929,8 +3933,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- if (priv->fops->phy_init_antenna_selection)
- priv->fops->phy_init_antenna_selection(priv);
+ if (fops->phy_init_antenna_selection)
+ fops->phy_init_antenna_selection(priv);
ret = rtl8xxxu_init_mac(priv);
@@ -3943,7 +3947,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- ret = priv->fops->init_phy_rf(priv);
+ ret = fops->init_phy_rf(priv);
if (ret)
goto exit;
@@ -3968,7 +3972,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Set TX buffer boundary
*/
- val8 = priv->fops->total_page_num + 1;
+ val8 = fops->total_page_num + 1;
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
@@ -3981,14 +3985,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* The vendor drivers set PBP for all devices, except 8192e.
* There is no explanation for this in any of the sources.
*/
- val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
- (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+ val8 = (fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+ (fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
if (priv->rtl_chip != RTL8192E)
rtl8xxxu_write8(priv, REG_PBP, val8);
dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
- ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
+ ret = fops->llt_init(priv);
if (ret) {
dev_warn(dev, "%s: LLT table init failed\n", __func__);
goto exit;
@@ -3997,12 +4001,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Chip specific quirks
*/
- priv->fops->usb_quirks(priv);
+ fops->usb_quirks(priv);
/*
* Enable TX report and TX report timer for 8723bu/8188eu/...
*/
- if (priv->fops->has_tx_report) {
+ if (fops->has_tx_report) {
val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
@@ -4137,8 +4141,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
}
- if (priv->fops->init_aggregation)
- priv->fops->init_aggregation(priv);
+ if (fops->init_aggregation)
+ fops->init_aggregation(priv);
/*
* Enable CCK and OFDM block
@@ -4155,7 +4159,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Start out with default power levels for channel 6, 20MHz
*/
- priv->fops->set_tx_power(priv, 1, false);
+ fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
if (priv->rtl_chip != RTL8192E) {
@@ -4171,8 +4175,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
- if (priv->fops->init_statistics)
- priv->fops->init_statistics(priv);
+ if (fops->init_statistics)
+ fops->init_statistics(priv);
if (priv->rtl_chip == RTL8192E) {
/*
@@ -4190,12 +4194,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8723a_phy_lc_calibrate(priv);
- priv->fops->phy_iq_calibrate(priv);
+ fops->phy_iq_calibrate(priv);
/*
* This should enable thermal meter
*/
- if (priv->fops->gen2_thermal_meter)
+ if (fops->gen2_thermal_meter)
rtl8xxxu_write_rfreg(priv,
RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
else
@@ -5371,7 +5375,8 @@ static void rtl8xxxu_int_complete(struct urb *urb)
struct device *dev = &priv->udev->dev;
int ret;
- dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_INTERRUPT)
+ dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
if (urb->status == 0) {
usb_anchor_urb(urb, &priv->int_anchor);
ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 3555a2f24285..315ccfb2dff5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -676,6 +676,7 @@
#define REG_SCH_TXCMD 0x05d0
/* define REG_FW_TSF_SYNC_CNT 0x04a0 */
+#define REG_SCH_TX_CMD 0x05f8
#define REG_FW_RESET_TSF_CNT_1 0x05fc
#define REG_FW_RESET_TSF_CNT_0 0x05fd
#define REG_FW_BCN_DIS_CNT 0x05fe
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 7aee5ebb147d..f95760c13c56 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -765,7 +765,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mac->bw_40 = false;
mac->bw_80 = false;
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ channel_type);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index d12586d4f845..0dfa9eac3926 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -179,7 +179,8 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ rtlpci->const_support_pciaspm);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 9a64f9b703e5..18d979affc18 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -151,7 +151,7 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", state_toset);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 629125658b87..5360d5332359 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -334,7 +334,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
@@ -405,7 +405,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 4ab6201daf1a..37d6efc3d240 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -355,9 +355,11 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u64 *)(val)) = tsf;
break; }
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -571,7 +573,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -735,7 +738,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2352,7 +2355,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index b504bd092fc4..f05c2c674165 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -62,7 +62,7 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -100,7 +100,7 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 7498a1218cba..fffaa92eda81 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1346,7 +1346,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2128,7 +2129,7 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2166,7 +2167,8 @@ static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2319,7 +2321,7 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 43fcb25c885f..7d152466152b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -352,7 +352,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -456,7 +456,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 60ab2ec4f4ef..27e3d5f9ca34 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -910,7 +910,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1567,7 +1568,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1605,7 +1606,8 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 244607951e28..a47be73a0980 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -141,9 +141,11 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -367,7 +369,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -2154,7 +2157,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index 8283e9b27639..24e483ba3fa4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -62,7 +62,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -97,7 +97,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index 1ee5a6ae9960..46d0d945f283 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -300,12 +300,9 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
default:
break;
@@ -554,7 +551,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 8789752f8143..ae8f055483fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1560,7 +1560,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -1931,7 +1931,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index 75a2deb23af1..8514ab652520 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -62,7 +62,7 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -95,7 +95,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index c972fa50926d..4b2976465905 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -277,12 +277,9 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
default:
break;
@@ -517,7 +514,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 62ef8209718f..8de29cc3ced0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -435,7 +435,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
@@ -512,7 +512,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
bwrite_success = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 57205514801c..d91f8bbfe7a0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -164,9 +164,11 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_INT_AC:
*((bool *)(val)) = rtlpriv->dm.disable_tx_int;
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -361,7 +363,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -502,7 +505,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2171,7 +2174,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 76a57ae4af3e..811ba57eb9bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -71,7 +71,7 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -106,7 +106,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 2a4810d32182..2a1edfd21b96 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -836,12 +836,9 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -2850,7 +2847,8 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
break;
@@ -2963,7 +2961,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2994,7 +2993,7 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -3182,7 +3181,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 0708eedd9671..b3f6a9ed15d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -344,7 +344,7 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -433,7 +433,7 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index b07af8d15273..ebf663e1a81a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -338,9 +338,11 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u64 *)(val)) = tsf;
}
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -566,7 +568,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -685,7 +688,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2463,7 +2466,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
index 8388e371c8e2..47da05dd3076 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
@@ -61,7 +61,7 @@ void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -91,7 +91,7 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index beafc9a10ad8..5ad7e753c357 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -1927,7 +1927,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -3001,7 +3002,7 @@ bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -3041,7 +3042,8 @@ static void rtl92ee_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -3187,7 +3189,7 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index ddfa0aee5bf8..52e4430edb54 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -77,9 +77,11 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((bool *)(val)) = rtlpriv->dm.current_mrc_switch;
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default: {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -297,7 +299,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -433,7 +436,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", variable);
break;
}
@@ -2465,7 +2468,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 44949b5cbb87..9849cb988186 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -68,7 +68,7 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -104,7 +104,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 881821f4e243..4bb75581ab38 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -442,7 +442,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -648,7 +649,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not processed\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index b7c0d38ee5b5..1186755e55b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -124,7 +124,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -230,7 +230,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index ba30efc2d195..f8be0bd7e326 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -141,9 +141,11 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -366,7 +368,8 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -546,7 +549,7 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2225,7 +2228,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index 13173351cbfd..c7be9342136c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -63,7 +63,7 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -105,7 +105,7 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 601b78efedfb..17b58cb32d55 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -1023,7 +1023,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1499,7 +1500,7 @@ bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1536,7 +1537,8 @@ static void rtl8723e_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -1682,7 +1684,7 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index d5da0f3c1217..8c5c27ce8e05 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -122,7 +122,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -195,7 +195,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 82e4476cab23..aba60c3145c5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -348,9 +348,11 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((u64 *)(val)) = tsf;
}
break;
+ case HAL_DEF_WOWLAN:
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -607,7 +609,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -723,8 +726,7 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n",
- variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -2565,7 +2567,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
index 4196efb723a2..497913eb3b37 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
@@ -58,7 +58,7 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -100,7 +100,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 285818df149b..3cc2232f25ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -837,7 +837,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -1507,7 +1507,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2515,7 +2516,7 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2553,7 +2554,8 @@ static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -2705,7 +2707,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index a4fc70e8c9c0..b665446351a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -392,7 +392,7 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -481,7 +481,7 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", cmd_len);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 0cddf1ad0fff..1281ebe0c30a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -480,7 +480,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -671,7 +671,8 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
@@ -800,7 +801,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break; }
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process %x\n", variable);
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -3934,7 +3935,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
index ba1946a0280e..fcb3b28c6b8f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
@@ -60,7 +60,7 @@ void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -133,7 +133,7 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case not process\n");
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index a71bfe38e7e1..5dad402171c2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -2063,12 +2063,9 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
}
break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -2133,16 +2130,10 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
break;
case RF90_PATH_B:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_C:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
- break;
case RF90_PATH_D:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpath);
break;
}
return true;
@@ -4670,7 +4661,7 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -4714,7 +4705,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
@@ -4820,7 +4812,7 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "switch case not process\n");
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index c5086c2229aa..595f7d5d091a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -394,110 +394,110 @@ enum io_type {
};
enum hw_variables {
- HW_VAR_ETHER_ADDR,
- HW_VAR_MULTICAST_REG,
- HW_VAR_BASIC_RATE,
- HW_VAR_BSSID,
- HW_VAR_MEDIA_STATUS,
- HW_VAR_SECURITY_CONF,
- HW_VAR_BEACON_INTERVAL,
- HW_VAR_ATIM_WINDOW,
- HW_VAR_LISTEN_INTERVAL,
- HW_VAR_CS_COUNTER,
- HW_VAR_DEFAULTKEY0,
- HW_VAR_DEFAULTKEY1,
- HW_VAR_DEFAULTKEY2,
- HW_VAR_DEFAULTKEY3,
- HW_VAR_SIFS,
- HW_VAR_R2T_SIFS,
- HW_VAR_DIFS,
- HW_VAR_EIFS,
- HW_VAR_SLOT_TIME,
- HW_VAR_ACK_PREAMBLE,
- HW_VAR_CW_CONFIG,
- HW_VAR_CW_VALUES,
- HW_VAR_RATE_FALLBACK_CONTROL,
- HW_VAR_CONTENTION_WINDOW,
- HW_VAR_RETRY_COUNT,
- HW_VAR_TR_SWITCH,
- HW_VAR_COMMAND,
- HW_VAR_WPA_CONFIG,
- HW_VAR_AMPDU_MIN_SPACE,
- HW_VAR_SHORTGI_DENSITY,
- HW_VAR_AMPDU_FACTOR,
- HW_VAR_MCS_RATE_AVAILABLE,
- HW_VAR_AC_PARAM,
- HW_VAR_ACM_CTRL,
- HW_VAR_DIS_Req_Qsize,
- HW_VAR_CCX_CHNL_LOAD,
- HW_VAR_CCX_NOISE_HISTOGRAM,
- HW_VAR_CCX_CLM_NHM,
- HW_VAR_TxOPLimit,
- HW_VAR_TURBO_MODE,
- HW_VAR_RF_STATE,
- HW_VAR_RF_OFF_BY_HW,
- HW_VAR_BUS_SPEED,
- HW_VAR_SET_DEV_POWER,
-
- HW_VAR_RCR,
- HW_VAR_RATR_0,
- HW_VAR_RRSR,
- HW_VAR_CPU_RST,
- HW_VAR_CHECK_BSSID,
- HW_VAR_LBK_MODE,
- HW_VAR_AES_11N_FIX,
- HW_VAR_USB_RX_AGGR,
- HW_VAR_USER_CONTROL_TURBO_MODE,
- HW_VAR_RETRY_LIMIT,
- HW_VAR_INIT_TX_RATE,
- HW_VAR_TX_RATE_REG,
- HW_VAR_EFUSE_USAGE,
- HW_VAR_EFUSE_BYTES,
- HW_VAR_AUTOLOAD_STATUS,
- HW_VAR_RF_2R_DISABLE,
- HW_VAR_SET_RPWM,
- HW_VAR_H2C_FW_PWRMODE,
- HW_VAR_H2C_FW_JOINBSSRPT,
- HW_VAR_H2C_FW_MEDIASTATUSRPT,
- HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
- HW_VAR_FW_PSMODE_STATUS,
- HW_VAR_INIT_RTS_RATE,
- HW_VAR_RESUME_CLK_ON,
- HW_VAR_FW_LPS_ACTION,
- HW_VAR_1X1_RECV_COMBINE,
- HW_VAR_STOP_SEND_BEACON,
- HW_VAR_TSF_TIMER,
- HW_VAR_IO_CMD,
-
- HW_VAR_RF_RECOVERY,
- HW_VAR_H2C_FW_UPDATE_GTK,
- HW_VAR_WF_MASK,
- HW_VAR_WF_CRC,
- HW_VAR_WF_IS_MAC_ADDR,
- HW_VAR_H2C_FW_OFFLOAD,
- HW_VAR_RESET_WFCRC,
-
- HW_VAR_HANDLE_FW_C2H,
- HW_VAR_DL_FW_RSVD_PAGE,
- HW_VAR_AID,
- HW_VAR_HW_SEQ_ENABLE,
- HW_VAR_CORRECT_TSF,
- HW_VAR_BCN_VALID,
- HW_VAR_FWLPS_RF_ON,
- HW_VAR_DUAL_TSF_RST,
- HW_VAR_SWITCH_EPHY_WoWLAN,
- HW_VAR_INT_MIGRATION,
- HW_VAR_INT_AC,
- HW_VAR_RF_TIMING,
-
- HAL_DEF_WOWLAN,
- HW_VAR_MRC,
- HW_VAR_KEEP_ALIVE,
- HW_VAR_NAV_UPPER,
-
- HW_VAR_MGT_FILTER,
- HW_VAR_CTRL_FILTER,
- HW_VAR_DATA_FILTER,
+ HW_VAR_ETHER_ADDR = 0x0,
+ HW_VAR_MULTICAST_REG = 0x1,
+ HW_VAR_BASIC_RATE = 0x2,
+ HW_VAR_BSSID = 0x3,
+ HW_VAR_MEDIA_STATUS= 0x4,
+ HW_VAR_SECURITY_CONF= 0x5,
+ HW_VAR_BEACON_INTERVAL = 0x6,
+ HW_VAR_ATIM_WINDOW = 0x7,
+ HW_VAR_LISTEN_INTERVAL = 0x8,
+ HW_VAR_CS_COUNTER = 0x9,
+ HW_VAR_DEFAULTKEY0 = 0xa,
+ HW_VAR_DEFAULTKEY1 = 0xb,
+ HW_VAR_DEFAULTKEY2 = 0xc,
+ HW_VAR_DEFAULTKEY3 = 0xd,
+ HW_VAR_SIFS = 0xe,
+ HW_VAR_R2T_SIFS = 0xf,
+ HW_VAR_DIFS = 0x10,
+ HW_VAR_EIFS = 0x11,
+ HW_VAR_SLOT_TIME = 0x12,
+ HW_VAR_ACK_PREAMBLE = 0x13,
+ HW_VAR_CW_CONFIG = 0x14,
+ HW_VAR_CW_VALUES = 0x15,
+ HW_VAR_RATE_FALLBACK_CONTROL= 0x16,
+ HW_VAR_CONTENTION_WINDOW = 0x17,
+ HW_VAR_RETRY_COUNT = 0x18,
+ HW_VAR_TR_SWITCH = 0x19,
+ HW_VAR_COMMAND = 0x1a,
+ HW_VAR_WPA_CONFIG = 0x1b,
+ HW_VAR_AMPDU_MIN_SPACE = 0x1c,
+ HW_VAR_SHORTGI_DENSITY = 0x1d,
+ HW_VAR_AMPDU_FACTOR = 0x1e,
+ HW_VAR_MCS_RATE_AVAILABLE = 0x1f,
+ HW_VAR_AC_PARAM = 0x20,
+ HW_VAR_ACM_CTRL = 0x21,
+ HW_VAR_DIS_Req_Qsize = 0x22,
+ HW_VAR_CCX_CHNL_LOAD = 0x23,
+ HW_VAR_CCX_NOISE_HISTOGRAM = 0x24,
+ HW_VAR_CCX_CLM_NHM = 0x25,
+ HW_VAR_TxOPLimit = 0x26,
+ HW_VAR_TURBO_MODE = 0x27,
+ HW_VAR_RF_STATE = 0x28,
+ HW_VAR_RF_OFF_BY_HW = 0x29,
+ HW_VAR_BUS_SPEED = 0x2a,
+ HW_VAR_SET_DEV_POWER = 0x2b,
+
+ HW_VAR_RCR = 0x2c,
+ HW_VAR_RATR_0 = 0x2d,
+ HW_VAR_RRSR = 0x2e,
+ HW_VAR_CPU_RST = 0x2f,
+ HW_VAR_CHECK_BSSID = 0x30,
+ HW_VAR_LBK_MODE = 0x31,
+ HW_VAR_AES_11N_FIX = 0x32,
+ HW_VAR_USB_RX_AGGR = 0x33,
+ HW_VAR_USER_CONTROL_TURBO_MODE = 0x34,
+ HW_VAR_RETRY_LIMIT = 0x35,
+ HW_VAR_INIT_TX_RATE = 0x36,
+ HW_VAR_TX_RATE_REG = 0x37,
+ HW_VAR_EFUSE_USAGE = 0x38,
+ HW_VAR_EFUSE_BYTES = 0x39,
+ HW_VAR_AUTOLOAD_STATUS = 0x3a,
+ HW_VAR_RF_2R_DISABLE = 0x3b,
+ HW_VAR_SET_RPWM = 0x3c,
+ HW_VAR_H2C_FW_PWRMODE = 0x3d,
+ HW_VAR_H2C_FW_JOINBSSRPT = 0x3e,
+ HW_VAR_H2C_FW_MEDIASTATUSRPT = 0x3f,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD = 0x40,
+ HW_VAR_FW_PSMODE_STATUS = 0x41,
+ HW_VAR_INIT_RTS_RATE = 0x42,
+ HW_VAR_RESUME_CLK_ON = 0x43,
+ HW_VAR_FW_LPS_ACTION = 0x44,
+ HW_VAR_1X1_RECV_COMBINE = 0x45,
+ HW_VAR_STOP_SEND_BEACON = 0x46,
+ HW_VAR_TSF_TIMER = 0x47,
+ HW_VAR_IO_CMD = 0x48,
+
+ HW_VAR_RF_RECOVERY = 0x49,
+ HW_VAR_H2C_FW_UPDATE_GTK = 0x4a,
+ HW_VAR_WF_MASK = 0x4b,
+ HW_VAR_WF_CRC = 0x4c,
+ HW_VAR_WF_IS_MAC_ADDR = 0x4d,
+ HW_VAR_H2C_FW_OFFLOAD = 0x4e,
+ HW_VAR_RESET_WFCRC = 0x4f,
+
+ HW_VAR_HANDLE_FW_C2H = 0x50,
+ HW_VAR_DL_FW_RSVD_PAGE = 0x51,
+ HW_VAR_AID = 0x52,
+ HW_VAR_HW_SEQ_ENABLE = 0x53,
+ HW_VAR_CORRECT_TSF = 0x54,
+ HW_VAR_BCN_VALID = 0x55,
+ HW_VAR_FWLPS_RF_ON = 0x56,
+ HW_VAR_DUAL_TSF_RST = 0x57,
+ HW_VAR_SWITCH_EPHY_WoWLAN = 0x58,
+ HW_VAR_INT_MIGRATION = 0x59,
+ HW_VAR_INT_AC = 0x5a,
+ HW_VAR_RF_TIMING = 0x5b,
+
+ HAL_DEF_WOWLAN = 0x5c,
+ HW_VAR_MRC = 0x5d,
+ HW_VAR_KEEP_ALIVE = 0x5e,
+ HW_VAR_NAV_UPPER = 0x5f,
+
+ HW_VAR_MGT_FILTER = 0x60,
+ HW_VAR_CTRL_FILTER = 0x61,
+ HW_VAR_DATA_FILTER = 0x62,
};
enum rt_media_status {
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 00a04dfc03d1..06d6943b257c 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1397,25 +1397,24 @@ out:
return ret;
}
-#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-
static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
- struct wl18xx_priv_conf *priv_conf)
+ struct wl18xx_priv_conf *priv_conf,
+ const char *file)
{
struct wlcore_conf_file *conf_file;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
+ ret = request_firmware(&fw, file, dev);
if (ret < 0) {
wl1271_error("could not get configuration binary %s: %d",
- WL18XX_CONF_FILE_NAME, ret);
+ file, ret);
return ret;
}
if (fw->size != WL18XX_CONF_SIZE) {
- wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
- WL18XX_CONF_SIZE, fw->size);
+ wl1271_error("%s configuration binary size is wrong, expected %zu got %zu",
+ file, WL18XX_CONF_SIZE, fw->size);
ret = -EINVAL;
goto out_release;
}
@@ -1448,9 +1447,12 @@ out_release:
static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
{
+ struct platform_device *pdev = wl->pdev;
+ struct wlcore_platdev_data *pdata = dev_get_platdata(&pdev->dev);
struct wl18xx_priv *priv = wl->priv;
- if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+ if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf,
+ pdata->family->cfg_name) < 0) {
wl1271_warning("falling back to default config");
/* apply driver default configuration */
@@ -2141,4 +2143,3 @@ MODULE_PARM_DESC(num_rx_desc_param,
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
-MODULE_FIRMWARE(WL18XX_CONF_FILE_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index f75d30444117..f00509ea8aca 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -282,6 +282,9 @@ EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware);
int wlcore_boot_upload_nvs(struct wl1271 *wl)
{
+ struct platform_device *pdev = wl->pdev;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
+ const char *nvs_name = "unknown";
size_t nvs_len, burst_len;
int i;
u32 dest_addr, val;
@@ -293,6 +296,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
return -ENODEV;
}
+ if (pdev_data && pdev_data->family)
+ nvs_name = pdev_data->family->nvs_name;
+
if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
struct wl1271_nvs_file *nvs =
(struct wl1271_nvs_file *)wl->nvs;
@@ -310,8 +316,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
(wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
wl->enable_11a)) {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len, sizeof(struct wl1271_nvs_file));
+ wl1271_error("%s size is not as expected: %zu != %zu",
+ nvs_name, wl->nvs_len,
+ sizeof(struct wl1271_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
wl->nvs_len = 0;
@@ -328,8 +335,8 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
if (nvs->general_params.dual_mode_select)
wl->enable_11a = true;
} else {
- wl1271_error("nvs size is not as expected: %zu != %zu",
- wl->nvs_len,
+ wl1271_error("%s size is not as expected: %zu != %zu",
+ nvs_name, wl->nvs_len,
sizeof(struct wl128x_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ef6c15b952cc..471521a0db7b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6413,9 +6413,12 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
goto out;
}
wl->nvs_len = fw->size;
- } else {
+ } else if (pdev_data->family->nvs_name) {
wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
- WL12XX_NVS_NAME);
+ pdev_data->family->nvs_name);
+ wl->nvs = NULL;
+ wl->nvs_len = 0;
+ } else {
wl->nvs = NULL;
wl->nvs_len = 0;
}
@@ -6510,21 +6513,29 @@ out:
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
- int ret;
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
+ const char *nvs_name;
+ int ret = 0;
- if (!wl->ops || !wl->ptable)
+ if (!wl->ops || !wl->ptable || !pdev_data)
return -EINVAL;
wl->dev = &pdev->dev;
wl->pdev = pdev;
platform_set_drvdata(pdev, wl);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
- wl, wlcore_nvs_cb);
- if (ret < 0) {
- wl1271_error("request_firmware_nowait failed: %d", ret);
- complete_all(&wl->nvs_loading_complete);
+ if (pdev_data->family && pdev_data->family->nvs_name) {
+ nvs_name = pdev_data->family->nvs_name;
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ nvs_name, &pdev->dev, GFP_KERNEL,
+ wl, wlcore_nvs_cb);
+ if (ret < 0) {
+ wl1271_error("request_firmware_nowait failed for %s: %d",
+ nvs_name, ret);
+ complete_all(&wl->nvs_loading_complete);
+ }
+ } else {
+ wlcore_nvs_cb(NULL, wl);
}
return ret;
@@ -6533,9 +6544,11 @@ EXPORT_SYMBOL_GPL(wlcore_probe);
int wlcore_remove(struct platform_device *pdev)
{
+ struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
- wait_for_completion(&wl->nvs_loading_complete);
+ if (pdev_data->family && pdev_data->family->nvs_name)
+ wait_for_completion(&wl->nvs_loading_complete);
if (!wl->initialized)
return 0;
@@ -6572,4 +6585,3 @@ MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL12XX_NVS_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 5839acbbc782..a6e94b1a12cb 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -216,17 +216,33 @@ static struct wl1271_if_operations sdio_ops = {
};
#ifdef CONFIG_OF
+
+static const struct wilink_family_data wl127x_data = {
+ .name = "wl127x",
+ .nvs_name = "ti-connectivity/wl127x-nvs.bin",
+};
+
+static const struct wilink_family_data wl128x_data = {
+ .name = "wl128x",
+ .nvs_name = "ti-connectivity/wl128x-nvs.bin",
+};
+
+static const struct wilink_family_data wl18xx_data = {
+ .name = "wl18xx",
+ .cfg_name = "ti-connectivity/wl18xx-conf.bin",
+};
+
static const struct of_device_id wlcore_sdio_of_match_table[] = {
- { .compatible = "ti,wl1271" },
- { .compatible = "ti,wl1273" },
- { .compatible = "ti,wl1281" },
- { .compatible = "ti,wl1283" },
- { .compatible = "ti,wl1801" },
- { .compatible = "ti,wl1805" },
- { .compatible = "ti,wl1807" },
- { .compatible = "ti,wl1831" },
- { .compatible = "ti,wl1835" },
- { .compatible = "ti,wl1837" },
+ { .compatible = "ti,wl1271", .data = &wl127x_data },
+ { .compatible = "ti,wl1273", .data = &wl127x_data },
+ { .compatible = "ti,wl1281", .data = &wl128x_data },
+ { .compatible = "ti,wl1283", .data = &wl128x_data },
+ { .compatible = "ti,wl1801", .data = &wl18xx_data },
+ { .compatible = "ti,wl1805", .data = &wl18xx_data },
+ { .compatible = "ti,wl1807", .data = &wl18xx_data },
+ { .compatible = "ti,wl1831", .data = &wl18xx_data },
+ { .compatible = "ti,wl1835", .data = &wl18xx_data },
+ { .compatible = "ti,wl1837", .data = &wl18xx_data },
{ }
};
@@ -234,9 +250,13 @@ static int wlcore_probe_of(struct device *dev, int *irq,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *np = dev->of_node;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(wlcore_sdio_of_match_table, np);
+ if (!of_id)
+ return -ENODEV;
- if (!np || !of_match_node(wlcore_sdio_of_match_table, np))
- return -ENODATA;
+ pdev_data->family = of_id->data;
*irq = irq_of_parse_and_map(np, 0);
if (!*irq) {
@@ -263,7 +283,7 @@ static int wlcore_probe_of(struct device *dev, int *irq,
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wlcore_platdev_data pdev_data;
+ struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -275,14 +295,15 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
- memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data.if_ops = &sdio_ops;
+ pdev_data = devm_kzalloc(&func->dev, sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ return -ENOMEM;
- glue = kzalloc(sizeof(*glue), GFP_KERNEL);
- if (!glue) {
- dev_err(&func->dev, "can't allocate glue\n");
- goto out;
- }
+ pdev_data->if_ops = &sdio_ops;
+
+ glue = devm_kzalloc(&func->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
glue->dev = &func->dev;
@@ -292,16 +313,16 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+ ret = wlcore_probe_of(&func->dev, &irq, pdev_data);
if (ret)
- goto out_free_glue;
+ goto out;
/* if sdio can keep power while host is suspended, enable wow */
mmcflags = sdio_get_host_pm_caps(func);
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data.pwr_in_suspend = true;
+ pdev_data->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -323,7 +344,7 @@ static int wl1271_probe(struct sdio_func *func,
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
- goto out_free_glue;
+ goto out;
}
glue->core->dev.parent = &func->dev;
@@ -341,8 +362,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, &pdev_data,
- sizeof(pdev_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -358,9 +379,6 @@ static int wl1271_probe(struct sdio_func *func,
out_dev_put:
platform_device_put(glue->core);
-out_free_glue:
- kfree(glue);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 0ed526e4c0df..f949ad2bd898 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -79,19 +79,19 @@
#define WSPI_MAX_NUM_OF_CHUNKS \
((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
-
-struct wilink_familiy_data {
- char name[8];
+static const struct wilink_family_data wl127x_data = {
+ .name = "wl127x",
+ .nvs_name = "ti-connectivity/wl127x-nvs.bin",
};
-static const struct wilink_familiy_data *wilink_data;
-
-static const struct wilink_familiy_data wl18xx_data = {
- .name = "wl18xx",
+static const struct wilink_family_data wl128x_data = {
+ .name = "wl128x",
+ .nvs_name = "ti-connectivity/wl128x-nvs.bin",
};
-static const struct wilink_familiy_data wl12xx_data = {
- .name = "wl12xx",
+static const struct wilink_family_data wl18xx_data = {
+ .name = "wl18xx",
+ .cfg_name = "ti-connectivity/wl18xx-conf.bin",
};
struct wl12xx_spi_glue {
@@ -429,10 +429,10 @@ static struct wl1271_if_operations spi_ops = {
};
static const struct of_device_id wlcore_spi_of_match_table[] = {
- { .compatible = "ti,wl1271", .data = &wl12xx_data},
- { .compatible = "ti,wl1273", .data = &wl12xx_data},
- { .compatible = "ti,wl1281", .data = &wl12xx_data},
- { .compatible = "ti,wl1283", .data = &wl12xx_data},
+ { .compatible = "ti,wl1271", .data = &wl127x_data},
+ { .compatible = "ti,wl1273", .data = &wl127x_data},
+ { .compatible = "ti,wl1281", .data = &wl128x_data},
+ { .compatible = "ti,wl1283", .data = &wl128x_data},
{ .compatible = "ti,wl1801", .data = &wl18xx_data},
{ .compatible = "ti,wl1805", .data = &wl18xx_data},
{ .compatible = "ti,wl1807", .data = &wl18xx_data},
@@ -460,9 +460,9 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
if (!of_id)
return -ENODEV;
- wilink_data = of_id->data;
- dev_info(&spi->dev, "selected chip familiy is %s\n",
- wilink_data->name);
+ pdev_data->family = of_id->data;
+ dev_info(&spi->dev, "selected chip family is %s\n",
+ pdev_data->family->name);
if (of_find_property(dt_node, "clock-xtal", NULL))
pdev_data->ref_clock_xtal = true;
@@ -479,13 +479,15 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wlcore_platdev_data pdev_data;
+ struct wlcore_platdev_data *pdev_data;
struct resource res[1];
int ret;
- memset(&pdev_data, 0x00, sizeof(pdev_data));
+ pdev_data = devm_kzalloc(&spi->dev, sizeof(*pdev_data), GFP_KERNEL);
+ if (!pdev_data)
+ return -ENOMEM;
- pdev_data.if_ops = &spi_ops;
+ pdev_data->if_ops = &spi_ops;
glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
@@ -509,7 +511,7 @@ static int wl1271_probe(struct spi_device *spi)
return PTR_ERR(glue->reg);
}
- ret = wlcore_probe_of(spi, glue, &pdev_data);
+ ret = wlcore_probe_of(spi, glue, pdev_data);
if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
@@ -522,7 +524,7 @@ static int wl1271_probe(struct spi_device *spi)
return ret;
}
- glue->core = platform_device_alloc(wilink_data->name,
+ glue->core = platform_device_alloc(pdev_data->family->name,
PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
@@ -543,8 +545,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, &pdev_data,
- sizeof(pdev_data));
+ ret = platform_device_add_data(glue->core, pdev_data,
+ sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 0277ae508b8a..e840985385fc 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -35,12 +35,11 @@
#include "conf.h"
#include "ini.h"
-/*
- * wl127x and wl128x are using the same NVS file name. However, the
- * ini parameters between them are different. The driver validates
- * the correct NVS size in wl1271_boot_upload_nvs().
- */
-#define WL12XX_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
+struct wilink_family_data {
+ const char *name;
+ const char *nvs_name; /* wl12xx nvs file */
+ const char *cfg_name; /* wl18xx cfg file */
+};
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -208,6 +207,7 @@ struct wl1271_if_operations {
struct wlcore_platdev_data {
struct wl1271_if_operations *if_ops;
+ const struct wilink_family_data *family;
bool ref_clock_xtal; /* specify whether the clock is XTAL or not */
u32 ref_clock_freq; /* in Hertz */
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 3a562683603c..b38fb2cf3364 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -292,8 +292,6 @@ struct xenvif {
#endif
struct xen_netif_ctrl_back_ring ctrl;
- struct task_struct *ctrl_task;
- wait_queue_head_t ctrl_wq;
unsigned int ctrl_irq;
/* Miscellaneous private stuff. */
@@ -359,7 +357,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
-int xenvif_ctrl_kthread(void *data);
+irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
@@ -412,8 +410,4 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
-#ifdef CONFIG_DEBUG_FS
-void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
-#endif
-
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index e8c5dddc54ba..613bac057650 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -360,74 +360,6 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
-#ifdef CONFIG_DEBUG_FS
-void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
-{
- unsigned int i;
-
- switch (vif->hash.alg) {
- case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
- seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
- break;
-
- case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
- seq_puts(m, "Hash Algorithm: NONE\n");
- /* FALLTHRU */
- default:
- return;
- }
-
- if (vif->hash.flags) {
- seq_puts(m, "\nHash Flags:\n");
-
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
- seq_puts(m, "- IPv4\n");
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
- seq_puts(m, "- IPv4 + TCP\n");
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
- seq_puts(m, "- IPv6\n");
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
- seq_puts(m, "- IPv6 + TCP\n");
- }
-
- seq_puts(m, "\nHash Key:\n");
-
- for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
- unsigned int j, n;
-
- n = 8;
- if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
- n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
-
- seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
-
- for (j = 0; j < n; j++, i++)
- seq_printf(m, "%02x ", vif->hash.key[i]);
-
- seq_puts(m, "\n");
- }
-
- if (vif->hash.size != 0) {
- seq_puts(m, "\nHash Mapping:\n");
-
- for (i = 0; i < vif->hash.size; ) {
- unsigned int j, n;
-
- n = 8;
- if (i + n >= vif->hash.size)
- n = vif->hash.size - i;
-
- seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
-
- for (j = 0; j < n; j++, i++)
- seq_printf(m, "%4u ", vif->hash.mapping[i]);
-
- seq_puts(m, "\n");
- }
- }
-}
-#endif /* CONFIG_DEBUG_FS */
-
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 83deeebfc2d1..fb50c6d5f6c3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -128,15 +128,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
-{
- struct xenvif *vif = dev_id;
-
- wake_up(&vif->ctrl_wq);
-
- return IRQ_HANDLED;
-}
-
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
@@ -570,8 +561,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
struct net_device *dev = vif->dev;
void *addr;
struct xen_netif_ctrl_sring *shared;
- struct task_struct *task;
- int err = -ENOMEM;
+ int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
&ring_ref, 1, &addr);
@@ -581,11 +571,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
shared = (struct xen_netif_ctrl_sring *)addr;
BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
- init_waitqueue_head(&vif->ctrl_wq);
-
- err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
- xenvif_ctrl_interrupt,
- 0, dev->name, vif);
+ err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
if (err < 0)
goto err_unmap;
@@ -593,19 +579,13 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
xenvif_init_hash(vif);
- task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
- "%s-control", dev->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", dev->name);
- err = PTR_ERR(task);
+ err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
+ IRQF_ONESHOT, "xen-netback-ctrl", vif);
+ if (err) {
+ pr_warn("Could not setup irq handler for %s\n", dev->name);
goto err_deinit;
}
- get_task_struct(task);
- vif->ctrl_task = task;
-
- wake_up_process(vif->ctrl_task);
-
return 0;
err_deinit:
@@ -774,12 +754,6 @@ void xenvif_disconnect_data(struct xenvif *vif)
void xenvif_disconnect_ctrl(struct xenvif *vif)
{
- if (vif->ctrl_task) {
- kthread_stop(vif->ctrl_task);
- put_task_struct(vif->ctrl_task);
- vif->ctrl_task = NULL;
- }
-
if (vif->ctrl_irq) {
xenvif_deinit_hash(vif);
unbind_from_irqhandler(vif->ctrl_irq, vif);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index edbae0b1e8f0..3d0c989384b5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2359,24 +2359,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
return 0;
}
-int xenvif_ctrl_kthread(void *data)
+irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{
struct xenvif *vif = data;
- for (;;) {
- wait_event_interruptible(vif->ctrl_wq,
- xenvif_ctrl_work_todo(vif) ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
-
- while (xenvif_ctrl_work_todo(vif))
- xenvif_ctrl_action(vif);
+ while (xenvif_ctrl_work_todo(vif))
+ xenvif_ctrl_action(vif);
- cond_resched();
- }
-
- return 0;
+ return IRQ_HANDLED;
}
static int __init netback_init(void)
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index bacf6e0c12b9..daf4c7867102 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
return count;
}
-static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
+static int xenvif_dump_open(struct inode *inode, struct file *filp)
{
int ret;
void *queue = NULL;
@@ -179,35 +179,13 @@ static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
- .open = xenvif_io_ring_open,
+ .open = xenvif_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
};
-static int xenvif_read_ctrl(struct seq_file *m, void *v)
-{
- struct xenvif *vif = m->private;
-
- xenvif_dump_hash_info(vif, m);
-
- return 0;
-}
-
-static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, xenvif_read_ctrl, inode->i_private);
-}
-
-static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
- .owner = THIS_MODULE,
- .open = xenvif_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void xenvif_debugfs_addif(struct xenvif *vif)
{
struct dentry *pfile;
@@ -232,17 +210,6 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
pr_warn("Creation of io_ring file returned %ld!\n",
PTR_ERR(pfile));
}
-
- if (vif->ctrl_task) {
- pfile = debugfs_create_file("ctrl",
- S_IRUSR,
- vif->xenvif_dbg_root,
- vif,
- &xenvif_dbg_ctrl_ops_fops);
- if (IS_ERR_OR_NULL(pfile))
- pr_warn("Creation of ctrl file returned %ld!\n",
- PTR_ERR(pfile));
- }
} else
netdev_warn(vif->dev,
"Creation of vif debugfs dir returned %ld!\n",
@@ -304,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
be->dev = dev;
dev_set_drvdata(&dev->dev, be);
+ be->state = XenbusStateInitialising;
+ err = xenbus_switch_state(dev, XenbusStateInitialising);
+ if (err)
+ goto fail;
+
sg = 1;
do {
@@ -416,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
be->hotplug_script = script;
- err = xenbus_switch_state(dev, XenbusStateInitWait);
- if (err)
- goto fail;
-
- be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */
err = backend_create_xenvif(be);
@@ -525,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
/* Handle backend state transitions:
*
- * The backend state starts in InitWait and the following transitions are
+ * The backend state starts in Initialising and the following transitions are
* allowed.
*
- * InitWait -> Connected
- *
- * ^ \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | V V
+ * Initialising -> InitWait -> Connected
+ * \
+ * \ ^ \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * V | V V
*
- * Closed <-> Closing
+ * Closed <-> Closing
*
* The state argument specifies the eventual state of the backend and the
* function transitions to that state via the shortest path.
@@ -548,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
{
while (be->state != state) {
switch (be->state) {
+ case XenbusStateInitialising:
+ switch (state) {
+ case XenbusStateInitWait:
+ case XenbusStateConnected:
+ case XenbusStateClosing:
+ backend_switch_state(be, XenbusStateInitWait);
+ break;
+ case XenbusStateClosed:
+ backend_switch_state(be, XenbusStateClosed);
+ break;
+ default:
+ BUG();
+ }
+ break;
case XenbusStateClosed:
switch (state) {
case XenbusStateInitWait:
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 96ccd4e943db..e17879dd5d5a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -565,6 +565,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netfront_queue *queue = NULL;
unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index;
+ struct sk_buff *nskb;
/* Drop the packet if no queues are set up */
if (num_queues < 1)
@@ -593,6 +594,20 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
+
+ /* The first req should be at least ETH_HLEN size or the packet will be
+ * dropped by netback.
+ */
+ if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto drop;
+ dev_kfree_skb_any(skb);
+ skb = nskb;
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ }
+
len = skb_headlen(skb);
spin_lock_irqsave(&queue->tx_lock, flags);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 715583f69d28..4d7bbd2df5c0 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
nvdimm_map->size = size;
kref_init(&nvdimm_map->kref);
- if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
+ if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
+ dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
+ &offset, size, dev_name(dev));
goto err_request_region;
+ }
if (flags)
nvdimm_map->mem = memremap(offset, size, flags);
@@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
kref_get(&nvdimm_map->kref);
nvdimm_bus_unlock(dev);
+ if (!nvdimm_map)
+ return NULL;
+
if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
return NULL;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 8024a0ef86d3..0b78a8211f4a 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -52,10 +52,28 @@ struct nvdimm_drvdata {
struct nd_region_data {
int ns_count;
int ns_active;
- unsigned int flush_mask;
- void __iomem *flush_wpq[0][0];
+ unsigned int hints_shift;
+ void __iomem *flush_wpq[0];
};
+static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
+ int dimm, int hint)
+{
+ unsigned int num = 1 << ndrd->hints_shift;
+ unsigned int mask = num - 1;
+
+ return ndrd->flush_wpq[dimm * num + (hint & mask)];
+}
+
+static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
+ int hint, void __iomem *flush)
+{
+ unsigned int num = 1 << ndrd->hints_shift;
+ unsigned int mask = num - 1;
+
+ ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
+}
+
static inline struct nd_namespace_index *to_namespace_index(
struct nvdimm_drvdata *ndd, int i)
{
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e8d5ba7b29af..4c0ac4abb629 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
- for (i = 0; i < nvdimm->num_flush; i++) {
+ for (i = 0; i < (1 << ndrd->hints_shift); i++) {
struct resource *res = &nvdimm->flush_wpq[i];
unsigned long pfn = PHYS_PFN(res->start);
void __iomem *flush_page;
@@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
if (j < i)
flush_page = (void __iomem *) ((unsigned long)
- ndrd->flush_wpq[dimm][j] & PAGE_MASK);
+ ndrd_get_flush_wpq(ndrd, dimm, j)
+ & PAGE_MASK);
else
flush_page = devm_nvdimm_ioremap(dev,
- PHYS_PFN(pfn), PAGE_SIZE);
+ PFN_PHYS(pfn), PAGE_SIZE);
if (!flush_page)
return -ENXIO;
- ndrd->flush_wpq[dimm][i] = flush_page
- + (res->start & ~PAGE_MASK);
+ ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+ + (res->start & ~PAGE_MASK));
}
return 0;
@@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region)
return -ENOMEM;
dev_set_drvdata(dev, ndrd);
- ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
+ if (!num_flush)
+ return 0;
+
+ ndrd->hints_shift = ilog2(num_flush);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region)
*/
wmb();
for (i = 0; i < nd_region->ndr_mappings; i++)
- if (ndrd->flush_wpq[i][0])
- writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
+ if (ndrd_get_flush_wpq(ndrd, i, 0))
+ writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
wmb();
}
EXPORT_SYMBOL_GPL(nvdimm_flush);
@@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
for (i = 0; i < nd_region->ndr_mappings; i++)
/* flush hints present, flushing required */
- if (ndrd->flush_wpq[i][0])
+ if (ndrd_get_flush_wpq(ndrd, i, 0))
return 1;
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8dcf5a960951..60f7eab11865 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1693,7 +1693,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_suspend_queue(dev->queues[i]);
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
- nvme_suspend_queue(dev->queues[0]);
+ /* A device might become IO incapable very soon during
+ * probe, before the admin queue is configured. Thus,
+ * queue_count can be 0 here.
+ */
+ if (dev->queue_count)
+ nvme_suspend_queue(dev->queues[0]);
} else {
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
@@ -2112,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ 0, }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index ab545fb347a0..fbdb2267e460 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -82,6 +82,8 @@ struct nvme_rdma_request {
enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = (1 << 0),
+ NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
+ NVME_RDMA_Q_DELETING = (1 << 2),
};
struct nvme_rdma_queue {
@@ -291,6 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
if (IS_ERR(req->mr)) {
ret = PTR_ERR(req->mr);
req->mr = NULL;
+ goto out;
}
req->mr->need_inval = false;
@@ -480,9 +483,14 @@ out_err:
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
+
+ if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
+ return;
+ dev = queue->device;
+ ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->ib_cq);
@@ -533,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
ret = -ENOMEM;
goto out_destroy_qp;
}
+ set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
return 0;
@@ -585,11 +594,13 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
goto out_destroy_cm_id;
}
+ clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
return 0;
out_destroy_cm_id:
+ nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
return ret;
}
@@ -608,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+ if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
return;
nvme_rdma_stop_queue(queue);
nvme_rdma_free_queue(queue);
@@ -652,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0;
out_free_queues:
- for (; i >= 1; i--)
+ for (i--; i >= 1; i--)
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
return ret;
@@ -761,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ int i;
nvme_stop_keep_alive(&ctrl->ctrl);
+
+ for (i = 0; i < ctrl->queue_count; i++)
+ clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1305,58 +1321,6 @@ out_destroy_queue_ib:
return ret;
}
-/**
- * nvme_rdma_device_unplug() - Handle RDMA device unplug
- * @queue: Queue that owns the cm_id that caught the event
- *
- * DEVICE_REMOVAL event notifies us that the RDMA device is about
- * to unplug so we should take care of destroying our RDMA resources.
- * This event will be generated for each allocated cm_id.
- *
- * In our case, the RDMA resources are managed per controller and not
- * only per queue. So the way we handle this is we trigger an implicit
- * controller deletion upon the first DEVICE_REMOVAL event we see, and
- * hold the event inflight until the controller deletion is completed.
- *
- * One exception that we need to handle is the destruction of the cm_id
- * that caught the event. Since we hold the callout until the controller
- * deletion is completed, we'll deadlock if the controller deletion will
- * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources,
- * then queue the controller deletion which won't destroy this queue and
- * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
- */
-static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
-{
- struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- int ret = 0;
-
- /* Own the controller deletion */
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return 0;
-
- dev_warn(ctrl->ctrl.device,
- "Got rdma device removal event, deleting ctrl\n");
-
- /* Get rid of reconnect work if its running */
- cancel_delayed_work_sync(&ctrl->reconnect_work);
-
- /* Disable the queue so ctrl delete won't free it */
- if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
- /* Free this queue ourselves */
- nvme_rdma_stop_queue(queue);
- nvme_rdma_destroy_queue_ib(queue);
-
- /* Return non-zero so the cm_id will destroy implicitly */
- ret = 1;
- }
-
- /* Queue controller deletion */
- queue_work(nvme_rdma_wq, &ctrl->delete_work);
- flush_work(&ctrl->delete_work);
- return ret;
-}
-
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
@@ -1398,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvme_rdma_error_recovery(queue->ctrl);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
- /* return 1 means impliciy CM ID destroy */
- return nvme_rdma_device_unplug(queue);
+ /* device removal is handled via the ib_client API */
+ break;
default:
dev_err(queue->ctrl->ctrl.device,
"Unexpected RDMA CM event (%d)\n", ev->event);
@@ -1700,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- int ret;
+ int ret = 0;
+ /*
+ * Keep a reference until all work is flushed since
+ * __nvme_rdma_del_ctrl can free the ctrl mem
+ */
+ if (!kref_get_unless_zero(&ctrl->ctrl.kref))
+ return -EBUSY;
ret = __nvme_rdma_del_ctrl(ctrl);
- if (ret)
- return ret;
-
- flush_work(&ctrl->delete_work);
-
- return 0;
+ if (!ret)
+ flush_work(&ctrl->delete_work);
+ nvme_put_ctrl(&ctrl->ctrl);
+ return ret;
}
static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
@@ -2005,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.create_ctrl = nvme_rdma_create_ctrl,
};
+static void nvme_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+ struct nvme_rdma_ctrl *ctrl;
+
+ /* Delete all controllers using this device */
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ if (ctrl->device->dev != ib_device)
+ continue;
+ dev_info(ctrl->ctrl.device,
+ "Removing ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ __nvme_rdma_del_ctrl(ctrl);
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ flush_workqueue(nvme_rdma_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+ .name = "nvme_rdma",
+ .add = nvme_rdma_add_one,
+ .remove = nvme_rdma_remove_one
+};
+
static int __init nvme_rdma_init_module(void)
{
+ int ret;
+
nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
if (!nvme_rdma_wq)
return -ENOMEM;
+ ret = ib_register_client(&nvme_rdma_ib_client);
+ if (ret) {
+ destroy_workqueue(nvme_rdma_wq);
+ return ret;
+ }
+
nvmf_register_transport(&nvme_rdma_transport);
return 0;
}
static void __exit nvme_rdma_cleanup_module(void)
{
- struct nvme_rdma_ctrl *ctrl;
-
nvmf_unregister_transport(&nvme_rdma_transport);
-
- mutex_lock(&nvme_rdma_ctrl_mutex);
- list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
- __nvme_rdma_del_ctrl(ctrl);
- mutex_unlock(&nvme_rdma_ctrl_mutex);
-
+ ib_unregister_client(&nvme_rdma_ib_client);
destroy_workqueue(nvme_rdma_wq);
}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d1ef7acf6930..f9357e09e9b3 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pci_bridge_d3_device_removed(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev)
dev->subordinate = NULL;
}
- pci_bridge_d3_device_removed(dev);
-
pci_destroy_dev(dev);
}
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 489ea1098c96..69b5e811ea2b 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
/************************ runtime PM support ***************************/
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_suspend(struct device *dev);
static int pcmcia_dev_resume(struct device *dev);
static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
int rc;
device_lock(dev);
- rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+ rc = pcmcia_dev_suspend(dev);
device_unlock(dev);
return rc;
}
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
/* PM support, also needed for reset */
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
.remove_dev = &pcmcia_bus_remove_socket,
};
+static const struct dev_pm_ops pcmcia_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
+};
struct bus_type pcmcia_bus_type = {
.name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
.dev_groups = pcmcia_dev_groups,
.probe = pcmcia_device_probe,
.remove = pcmcia_device_remove,
- .suspend = pcmcia_dev_suspend,
- .resume = pcmcia_dev_resume,
+ .pm = &pcmcia_bus_pm_ops,
};
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 483f919e0d2e..91b5f5724cba 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
{
- struct pcmcia_low_level *ops = dev->platform_data;
/*
* We have at least one socket, so set MECR:CIT
* (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
goto err1;
}
- pxa2xx_configure_sockets(&dev->dev);
+ pxa2xx_configure_sockets(&dev->dev, ops);
dev_set_drvdata(&dev->dev, sinfo);
return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
{
- pxa2xx_configure_sockets(dev);
+ struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
+
+ pxa2xx_configure_sockets(dev, ops);
return 0;
}
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index b609b45469ed..e58c7a415418 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,4 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
-void pxa2xx_configure_sockets(struct device *dev);
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 12f0dd091477..2f490930430d 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
int pcmcia_badge4_init(struct sa1111_dev *dev)
{
- int ret = -ENODEV;
-
- if (machine_is_badge4()) {
- printk(KERN_INFO
- "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
- __func__,
- badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
- sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
- ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ printk(KERN_INFO
+ "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
+ __func__,
+ badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
+
+ sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
+ return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
static int __init pcmv_setup(char *s)
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index a1531feb8460..3d95dffcff7a 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -18,6 +18,7 @@
#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
+#include <asm/mach-types.h>
#include <asm/irq.h>
#include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
+ ret = -ENODEV;
#ifdef CONFIG_SA1100_BADGE4
- pcmcia_badge4_init(dev);
+ if (machine_is_badge4())
+ ret = pcmcia_badge4_init(dev);
#endif
#ifdef CONFIG_SA1100_JORNADA720
- pcmcia_jornada720_init(dev);
+ if (machine_is_jornada720())
+ ret = pcmcia_jornada720_init(dev);
#endif
#ifdef CONFIG_ARCH_LUBBOCK
- pcmcia_lubbock_init(dev);
+ if (machine_is_lubbock())
+ ret = pcmcia_lubbock_init(dev);
#endif
#ifdef CONFIG_ASSABET_NEPONSET
- pcmcia_neponset_init(dev);
+ if (machine_is_assabet())
+ ret = pcmcia_neponset_init(dev);
#endif
- return 0;
+
+ if (ret) {
+ release_mem_region(dev->res.start, 512);
+ sa1111_disable_device(dev);
+ }
+
+ return ret;
}
static int pcmcia_remove(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index c2c30580c83f..480a3ede27c8 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
int pcmcia_jornada720_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
+ unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
- if (machine_is_jornada720()) {
- unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+ /* Fixme: why messing around with SA11x0's GPIO1? */
+ GRER |= 0x00000002;
- GRER |= 0x00000002;
+ /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+ sa1111_set_io_dir(sadev, pin, 0, 0);
+ sa1111_set_io(sadev, pin, 0);
+ sa1111_set_sleep_io(sadev, pin, 0);
- /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
- sa1111_set_io_dir(sadev, pin, 0, 0);
- sa1111_set_io(sadev, pin, 0);
- sa1111_set_sleep_io(sadev, pin, 0);
-
- sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
- ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
index c5caf5790451..e741f499c875 100644
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ b/drivers/pcmcia/sa1111_lubbock.c
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
int pcmcia_lubbock_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
-
- if (machine_is_lubbock()) {
- /*
- * Set GPIO_A<3:0> to be outputs for the MAX1600,
- * and switch to standby mode.
- */
- sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
- /* Set CF Socket 1 power to standby mode. */
- lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
+ /*
+ * Set GPIO_A<3:0> to be outputs for the MAX1600,
+ * and switch to standby mode.
+ */
+ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+ sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
- pxa2xx_configure_sockets(&sadev->dev);
- ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
- pxa2xx_drv_pcmcia_add_one);
- }
+ /* Set CF Socket 1 power to standby mode. */
+ lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
- return ret;
+ pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
+ pxa2xx_drv_pcmcia_add_one);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 1d78739c4c07..019c395eb4bf 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
int pcmcia_neponset_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
-
- if (machine_is_assabet()) {
- /*
- * Set GPIO_A<3:0> to be outputs for the MAX1600,
- * and switch to standby mode.
- */
- sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
- ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ /*
+ * Set GPIO_A<3:0> to be outputs for the MAX1600,
+ * and switch to standby mode.
+ */
+ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+ sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 9f6ec87b9f9e..48140ac73ed6 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -144,19 +144,19 @@ static int
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
{
struct soc_pcmcia_timing timing;
- unsigned int clock = clk_get_rate(skt->clk);
+ unsigned int clock = clk_get_rate(skt->clk) / 1000;
unsigned long mecr = MECR;
char *p = buf;
soc_common_pcmcia_get_timing(skt, &timing);
- p+=sprintf(p, "I/O : %u (%u)\n", timing.io,
+ p+=sprintf(p, "I/O : %uns (%uns)\n", timing.io,
sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
- p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
+ p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
- p+=sprintf(p, "common : %u (%u)\n", timing.mem,
+ p+=sprintf(p, "common : %uns (%uns)\n", timing.mem,
sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
return p - buf;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index eed5e9c05353..d5ca760c4eb2 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
if (skt->cs_state.flags & SS_IOCARD)
- stat |= state.bvd1 ? SS_STSCHG : 0;
+ stat |= state.bvd1 ? 0 : SS_STSCHG;
else {
if (state.bvd1 == 0)
stat |= SS_BATDEAD;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 2e481b9e8ea5..86280b7e41f3 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -263,6 +263,7 @@ no_sysfs:
no_device:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
+ ida_simple_remove(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 3fa17ac8df54..cebc296463ad 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2247,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
{
struct rio_channel *ch;
unsigned int i;
+ LIST_HEAD(list);
riocm_debug(EXIT, ".");
+ /*
+ * If there are any channels left in connected state send
+ * close notification to the connection partner.
+ * First build a list of channels that require a closing
+ * notification because function riocm_send_close() should
+ * be called outside of spinlock protected code.
+ */
spin_lock_bh(&idr_lock);
idr_for_each_entry(&ch_idr, ch, i) {
- riocm_debug(EXIT, "close ch %d", ch->id);
- if (ch->state == RIO_CM_CONNECTED)
- riocm_send_close(ch);
+ if (ch->state == RIO_CM_CONNECTED) {
+ riocm_debug(EXIT, "close ch %d", ch->id);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
}
spin_unlock_bh(&idr_lock);
+ list_for_each_entry(ch, &list, ch_node)
+ riocm_send_close(ch);
+
return NOTIFY_DONE;
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bf40063de202..6d4b68c483f3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
__u16, __u16,
enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
+int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
/* exports for OSN */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7dba6c8537a1..20cf29613043 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
int e;
e = 0;
- while (buffer->element[e].addr) {
+ while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
+ buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
@@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
return rc;
}
+/* try to restore device features on a device after recovery */
+int qeth_recover_features(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t recover = dev->features;
+
+ if (recover & NETIF_F_IP_CSUM) {
+ if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
+ recover ^= NETIF_F_IP_CSUM;
+ }
+ if (recover & NETIF_F_RXCSUM) {
+ if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
+ recover ^= NETIF_F_RXCSUM;
+ }
+ if (recover & NETIF_F_TSO) {
+ if (qeth_set_ipa_tso(card, 1))
+ recover ^= NETIF_F_TSO;
+ }
+
+ if (recover == dev->features)
+ return 0;
+
+ dev_warn(&card->gdev->dev,
+ "Device recovery failed to restore all offload features\n");
+ dev->features = recover;
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_recover_features);
+
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7bc20c5188bc..bb27058fa9f0 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
- /* Turn on SG per default */
- card->dev->features |= NETIF_F_SG;
}
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
- card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);
@@ -1246,6 +1243,9 @@ contin:
}
/* this also sets saved unicast addresses */
qeth_l2_set_rx_mode(card->dev);
+ rtnl_lock();
+ qeth_recover_features(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 72934666fedf..272d9e7419be 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
if (addr->in_progress)
return -EINPROGRESS;
+ if (!qeth_card_hw_is_reachable(card)) {
+ addr->disp_flag = QETH_DISP_ADDR_DELETE;
+ return 0;
+ }
+
rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
@@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
hash_add(card->ip_htable, &addr->hnode,
qeth_l3_ipaddr_hash(addr));
+ if (!qeth_card_hw_is_reachable(card)) {
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
+ return 0;
+ }
+
/* qeth_l3_register_addr_entry can go to sleep
* if we add a IPV4 addr. It is caused by the reason
* that SETIP ipa cmd starts ARP staff for IPV4 addr.
@@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
int i;
int rc;
- QETH_CARD_TEXT(card, 4, "recoverip");
+ QETH_CARD_TEXT(card, 4, "recovrip");
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+ qeth_l3_deregister_addr_entry(card, addr);
+ hash_del(&addr->hnode);
+ kfree(addr);
+ } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock);
@@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1) {
+ if (addr->ref_counter < 1)
qeth_l3_delete_ip(card, addr);
- kfree(addr);
- }
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
spin_lock_bh(&card->ip_lock);
- if (!qeth_l3_ip_from_hash(card, ipaddr))
+ if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
@@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
spin_lock_bh(&card->ip_lock);
- if (!qeth_l3_ip_from_hash(card, ipaddr))
+ if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
@@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
- card->dev->features = NETIF_F_SG;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_keep_dst(card->dev);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
- card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3269,6 +3279,7 @@ contin:
else
dev_open(card->dev);
qeth_l3_set_multicast_list(card->dev);
+ qeth_recover_features(card->dev);
rtnl_unlock();
}
qeth_trace_features(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 65645b11fc19..0e00a5ce0f00 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->u.a6.pfxlen = 0;
addr->type = QETH_IP_TYPE_NORMAL;
+ spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
kfree(addr);
}
@@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
+
+ spin_lock_bh(&card->ip_lock);
qeth_l3_add_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
kfree(addr);
return count;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e4ba2d2616cd..7c0d7af0d3b7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -84,6 +84,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = false,
.add = t4_uld_add,
.rx_handler = t4_uld_rx_handler,
.state_change = t4_uld_state_change,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ba9af4a2bd2a..ec6381e57eb7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -486,6 +486,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
+ shost->use_blk_mq = scsi_use_blk_mq;
+
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f36aca44394..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1160,7 +1160,6 @@ bool scsi_use_blk_mq = true;
bool scsi_use_blk_mq = false;
#endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
-EXPORT_SYMBOL_GPL(scsi_use_blk_mq);
static int __init init_scsi(void)
{
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 57a4b9973320..85c8a51bc563 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
extern void scsi_exit_hosts(void);
/* scsi.c */
+extern bool scsi_use_blk_mq;
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
#ifdef CONFIG_SCSI_LOGGING
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
index a10d4f82b954..13224694a8ae 100644
--- a/drivers/staging/media/cec/TODO
+++ b/drivers/staging/media/cec/TODO
@@ -12,6 +12,7 @@ Hopefully this will happen later in 2016.
Other TODOs:
+- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
Applications should be able to choose this when calling S_LOG_ADDRS.
- If the reply field of cec_msg is set then when the reply arrives it
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index b2393bbacb26..946986f3ac0d 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap,
u64 ts = ktime_get_ns();
struct cec_fh *fh;
- mutex_lock(&adap->devnode.fhs_lock);
+ mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, ev, ts);
- mutex_unlock(&adap->devnode.fhs_lock);
+ mutex_unlock(&adap->devnode.lock);
}
/*
@@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
CEC_MODE_MONITOR_ALL;
- mutex_lock(&adap->devnode.fhs_lock);
+ mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower >= monitor_mode)
cec_queue_msg_fh(fh, msg);
}
- mutex_unlock(&adap->devnode.fhs_lock);
+ mutex_unlock(&adap->devnode.lock);
}
/*
@@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
{
struct cec_fh *fh;
- mutex_lock(&adap->devnode.fhs_lock);
+ mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_FOLLOWER)
cec_queue_msg_fh(fh, msg);
}
- mutex_unlock(&adap->devnode.fhs_lock);
+ mutex_unlock(&adap->devnode.lock);
}
/* Notify userspace of an adapter state change. */
@@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
if (!valid_la || msg->len <= 1)
return;
+ if (adap->log_addrs.log_addr_mask == 0)
+ return;
+
/*
* Process the message on the protocol level. If is_reply is true,
* then cec_receive_notify() won't pass on the reply to the listener(s)
@@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg)
dprintk(1, "could not claim LA %d\n", i);
}
+ if (adap->log_addrs.log_addr_mask == 0 &&
+ !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+ goto unconfigure;
+
configured:
if (adap->log_addrs.log_addr_mask == 0) {
/* Fall back to unregistered */
las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
las->log_addr_mask = 1 << las->log_addr[0];
+ for (i = 1; i < las->num_log_addrs; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
adap->is_configured = true;
adap->is_configuring = false;
@@ -1070,6 +1079,8 @@ configured:
cec_report_features(adap, i);
cec_report_phys_addr(adap, i);
}
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
mutex_lock(&adap->lock);
adap->kthread_config = NULL;
mutex_unlock(&adap->lock);
@@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
u8 init_laddr = cec_msg_initiator(msg);
u8 devtype = cec_log_addr2dev(adap, dest_laddr);
int la_idx = cec_log_addr2idx(adap, dest_laddr);
- bool is_directed = la_idx >= 0;
bool from_unregistered = init_laddr == 0xf;
struct cec_msg tx_cec_msg = { };
@@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Unprocessed messages are aborted if userspace isn't doing
* any processing either.
*/
- if (is_directed && !is_reply && !adap->follower_cnt &&
+ if (!is_broadcast && !is_reply && !adap->follower_cnt &&
!adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
return cec_feature_abort(adap, msg);
break;
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c
index 7be7615a0fdf..e274e2f22398 100644
--- a/drivers/staging/media/cec/cec-api.c
+++ b/drivers/staging/media/cec/cec-api.c
@@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
return -EFAULT;
- log_addrs.flags = 0;
+ log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
mutex_lock(&adap->lock);
if (!adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
void __user *parg = (void __user *)arg;
if (!devnode->registered)
- return -EIO;
+ return -ENODEV;
switch (cmd) {
case CEC_ADAP_G_CAPS:
@@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp)
filp->private_data = fh;
- mutex_lock(&devnode->fhs_lock);
+ mutex_lock(&devnode->lock);
/* Queue up initial state events */
ev_state.state_change.phys_addr = adap->phys_addr;
ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
cec_queue_event_fh(fh, &ev_state, 0);
list_add(&fh->list, &devnode->fhs);
- mutex_unlock(&devnode->fhs_lock);
+ mutex_unlock(&devnode->lock);
return 0;
}
@@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp)
cec_monitor_all_cnt_dec(adap);
mutex_unlock(&adap->lock);
- mutex_lock(&devnode->fhs_lock);
+ mutex_lock(&devnode->lock);
list_del(&fh->list);
- mutex_unlock(&devnode->fhs_lock);
+ mutex_unlock(&devnode->lock);
/* Unhook pending transmits from this filehandle. */
mutex_lock(&adap->lock);
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
index 112a5fae12f5..3b1e4d2b190d 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/staging/media/cec/cec-core.c
@@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode)
{
/*
* Check if the cec device is available. This needs to be done with
- * the cec_devnode_lock held to prevent an open/unregister race:
+ * the devnode->lock held to prevent an open/unregister race:
* without the lock, the device could be unregistered and freed between
* the devnode->registered check and get_device() calls, leading to
* a crash.
*/
- mutex_lock(&cec_devnode_lock);
+ mutex_lock(&devnode->lock);
/*
* return ENXIO if the cec device has been removed
* already or if it is not registered anymore.
*/
if (!devnode->registered) {
- mutex_unlock(&cec_devnode_lock);
+ mutex_unlock(&devnode->lock);
return -ENXIO;
}
/* and increase the device refcount */
get_device(&devnode->dev);
- mutex_unlock(&cec_devnode_lock);
+ mutex_unlock(&devnode->lock);
return 0;
}
void cec_put_device(struct cec_devnode *devnode)
{
- mutex_lock(&cec_devnode_lock);
put_device(&devnode->dev);
- mutex_unlock(&cec_devnode_lock);
}
/* Called when the last user of the cec device exits. */
@@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd)
struct cec_devnode *devnode = to_cec_devnode(cd);
mutex_lock(&cec_devnode_lock);
-
/* Mark device node number as free */
clear_bit(devnode->minor, cec_devnode_nums);
-
mutex_unlock(&cec_devnode_lock);
+
cec_delete_adapter(to_cec_adapter(devnode));
}
@@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Initialization */
INIT_LIST_HEAD(&devnode->fhs);
- mutex_init(&devnode->fhs_lock);
+ mutex_init(&devnode->lock);
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
@@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
cdev_del:
cdev_del(&devnode->cdev);
clr_bit:
+ mutex_lock(&cec_devnode_lock);
clear_bit(devnode->minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
return ret;
}
@@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
{
struct cec_fh *fh;
+ mutex_lock(&devnode->lock);
+
/* Check if devnode was never registered or already unregistered */
- if (!devnode->registered || devnode->unregistered)
+ if (!devnode->registered || devnode->unregistered) {
+ mutex_unlock(&devnode->lock);
return;
+ }
- mutex_lock(&devnode->fhs_lock);
list_for_each_entry(fh, &devnode->fhs, list)
wake_up_interruptible(&fh->wait);
- mutex_unlock(&devnode->fhs_lock);
devnode->registered = false;
devnode->unregistered = true;
+ mutex_unlock(&devnode->lock);
+
device_del(&devnode->dev);
cdev_del(&devnode->cdev);
put_device(&devnode->dev);
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
index 94f8590492dc..ed8bd95ad6d0 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work)
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
break;
- case MSGCODE_TRANSMIT_FAILED_LINE:
- cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST,
- 1, 0, 0, 0);
- break;
case MSGCODE_TRANSMIT_FAILED_ACK:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
0, 1, 0, 0);
break;
+ case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
@@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
schedule_work(&pulse8->work);
break;
+ case MSGCODE_HIGH_ERROR:
+ case MSGCODE_LOW_ERROR:
+ case MSGCODE_RECEIVE_FAILED:
case MSGCODE_TIMEOUT_ERROR:
break;
case MSGCODE_COMMAND_ACCEPTED:
@@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
int err;
cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
- cmd[1] = 3;
+ cmd[1] = signal_free_time;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 27dd11aff934..ad26b9372f10 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -652,6 +652,9 @@ static struct iscsit_transport cxgbit_transport = {
static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = true,
.add = cxgbit_uld_add,
.state_change = cxgbit_uld_state_change,
.lro_rx_handler = cxgbit_uld_lro_rx_handler,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 15ce4ab11688..a2d90aca779f 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -240,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
- /* Fix up bInterval values outside the legal range. Use 32 ms if no
- * proper value can be guessed. */
+ /*
+ * Fix up bInterval values outside the legal range.
+ * Use 10 or 8 ms if no proper value can be guessed.
+ */
i = 0; /* i = min, j = max, n = default */
j = 255;
if (usb_endpoint_xfer_int(d)) {
@@ -250,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
- /* Many device manufacturers are using full-speed
+ /*
+ * Many device manufacturers are using full-speed
* bInterval values in high-speed interrupt endpoint
- * descriptors. Try to fix those and fall back to a
- * 32 ms default value otherwise. */
+ * descriptors. Try to fix those and fall back to an
+ * 8-ms default value otherwise.
+ */
n = fls(d->bInterval*8);
if (n == 0)
- n = 9; /* 32 ms = 2^(9-1) uframes */
+ n = 7; /* 8 ms = 2^(7-1) uframes */
j = 16;
/*
@@ -271,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
}
break;
default: /* USB_SPEED_FULL or _LOW */
- /* For low-speed, 10 ms is the official minimum.
+ /*
+ * For low-speed, 10 ms is the official minimum.
* But some "overclocked" devices might want faster
- * polling so we'll allow it. */
- n = 32;
+ * polling so we'll allow it.
+ */
+ n = 10;
break;
}
} else if (usb_endpoint_xfer_isoc(d)) {
@@ -282,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
j = 16;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_HIGH:
- n = 9; /* 32 ms = 2^(9-1) uframes */
+ n = 7; /* 8 ms = 2^(7-1) uframes */
break;
default: /* USB_SPEED_FULL */
- n = 6; /* 32 ms = 2^(6-1) frames */
+ n = 4; /* 8 ms = 2^(4-1) frames */
break;
}
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 886526b5fcdd..73cfa13fc0dc 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
config USB_MUSB_OMAP2PLUS
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index a204782ae530..e98b6e57b703 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
/* Infineon Flashloader driver */
#define FLASHLOADER_IDS() \
{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
- { USB_DEVICE(0x8087, 0x0716) }
+ { USB_DEVICE(0x8087, 0x0716) }, \
+ { USB_DEVICE(0x8087, 0x0801) }
DEVICE(flashloader, FLASHLOADER_IDS);
/* Google Serial USB SubClass */