summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/nixge.txt60
-rw-r--r--Documentation/networking/device_drivers/intel/e100.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/e1000.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/e1000e.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/fm10k.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/i40e.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/iavf.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ice.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/igb.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/igbvf.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ixgb.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbe.rst1
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbevf.rst1
-rw-r--r--Documentation/networking/device_drivers/stmicro/stmmac.txt2
-rw-r--r--arch/m68k/coldfire/m5272.c2
-rw-r--r--arch/mips/ar7/platform.c4
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--drivers/bcma/bcma_private.h9
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/bcma/host_pci.c2
-rw-r--r--drivers/bcma/host_soc.c4
-rw-r--r--drivers/bcma/main.c45
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c102
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c22
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c450
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c110
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h28
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c33
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c17
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c178
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c195
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c66
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h34
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c1032
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c109
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c703
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c258
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c37
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c33
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c201
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c417
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c154
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h4
-rw-r--r--drivers/net/ethernet/ni/nixge.c44
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c572
-rw-r--r--drivers/net/ethernet/realtek/r8169.c46
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c79
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c18
-rw-r--r--drivers/net/netdevsim/netdev.c23
-rw-r--r--drivers/net/phy/dp83867.c47
-rw-r--r--drivers/net/phy/dp83tc811.c15
-rw-r--r--drivers/net/phy/fixed_phy.c82
-rw-r--r--drivers/net/phy/marvell10g.c38
-rw-r--r--drivers/net/phy/phy-c45.c21
-rw-r--r--drivers/net/phy/phy-core.c116
-rw-r--r--drivers/net/phy/phy.c11
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c36
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h711
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tof.h393
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c212
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c813
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c249
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c116
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c377
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c305
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c266
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c47
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h70
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c222
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h2
-rw-r--r--drivers/net/wireless/quantenna/Makefile1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h19
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c43
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c63
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h22
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c21
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.c31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c96
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h27
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h28
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.h17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c17
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c27
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c93
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c95
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c67
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c133
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h22
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h35
-rw-r--r--drivers/net/wireless/st/cw1200/debug.c26
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c4
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c1
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.c59
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c20
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h10
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/of/of_mdio.c9
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c16
-rw-r--r--include/linux/bcma/bcma.h11
-rw-r--r--include/linux/ethtool.h15
-rw-r--r--include/linux/netdevice.h9
-rw-r--r--include/linux/phy.h134
-rw-r--r--include/linux/phy_fixed.h8
-rw-r--r--include/net/flow_offload.h203
-rw-r--r--include/net/pkt_cls.h16
-rw-r--r--include/net/switchdev.h11
-rw-r--r--include/trace/events/devlink.h33
-rw-r--r--net/bridge/br_switchdev.c9
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c57
-rw-r--r--net/core/devlink.c1
-rw-r--r--net/core/ethtool.c241
-rw-r--r--net/core/flow_offload.c153
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/dsa/slave.c18
-rw-r--r--net/ipv4/ipmr.c13
-rw-r--r--net/sched/cls_api.c116
-rw-r--r--net/sched/cls_flower.c71
-rw-r--r--net/switchdev/switchdev.c20
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh200
291 files changed, 8835 insertions, 6650 deletions
diff --git a/Documentation/devicetree/bindings/net/nixge.txt b/Documentation/devicetree/bindings/net/nixge.txt
index 44a7358b4399..85d7240a9b20 100644
--- a/Documentation/devicetree/bindings/net/nixge.txt
+++ b/Documentation/devicetree/bindings/net/nixge.txt
@@ -12,10 +12,15 @@ Required properties:
- interrupts: Should contain tx and rx interrupt
- interrupt-names: Should be "rx" and "tx"
- phy-mode: See ethernet.txt file in the same directory.
-- phy-handle: See ethernet.txt file in the same directory.
- nvmem-cells: Phandle of nvmem cell containing the MAC address
- nvmem-cell-names: Should be "address"
+Optional properties:
+- mdio subnode to indicate presence of MDIO controller
+- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
+ Use instead of phy-handle.
+- phy-handle: See ethernet.txt file in the same directory.
+
Examples (10G generic PHY):
nixge0: ethernet@40000000 {
compatible = "ni,xge-enet-3.00";
@@ -33,8 +38,55 @@ Examples (10G generic PHY):
phy-mode = "xgmii";
phy-handle = <&ethernet_phy1>;
- ethernet_phy1: ethernet-phy@4 {
- compatible = "ethernet-phy-ieee802.3-c45";
- reg = <4>;
+ mdio {
+ ethernet_phy1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <4>;
+ };
};
};
+
+Examples (10G generic PHY, no MDIO):
+ nixge0: ethernet@40000000 {
+ compatible = "ni,xge-enet-2.00";
+ reg = <0x40000000 0x6000>;
+
+ nvmem-cells = <&eth1_addr>;
+ nvmem-cell-names = "address";
+
+ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx", "tx";
+ interrupt-parent = <&intc>;
+
+ phy-mode = "xgmii";
+ phy-handle = <&ethernet_phy1>;
+ };
+
+Examples (1G generic fixed-link + MDIO):
+ nixge0: ethernet@40000000 {
+ compatible = "ni,xge-enet-2.00";
+ reg = <0x40000000 0x6000>;
+
+ nvmem-cells = <&eth1_addr>;
+ nvmem-cell-names = "address";
+
+ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx", "tx";
+ interrupt-parent = <&intc>;
+
+ phy-mode = "xgmii";
+
+ fixed-link {
+ speed = <1000>;
+ pause;
+ link-gpios = <&gpio0 63 GPIO_ACTIVE_HIGH>;
+ };
+
+ mdio {
+ ethernet_phy1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <4>;
+ };
+ };
+
+ };
diff --git a/Documentation/networking/device_drivers/intel/e100.rst b/Documentation/networking/device_drivers/intel/e100.rst
index 5e2839b4ec92..2b9f4887beda 100644
--- a/Documentation/networking/device_drivers/intel/e100.rst
+++ b/Documentation/networking/device_drivers/intel/e100.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==============================================================
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
diff --git a/Documentation/networking/device_drivers/intel/e1000.rst b/Documentation/networking/device_drivers/intel/e1000.rst
index 6379d4d20771..956560b6e745 100644
--- a/Documentation/networking/device_drivers/intel/e1000.rst
+++ b/Documentation/networking/device_drivers/intel/e1000.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+===========================================================
Linux* Base Driver for Intel(R) Ethernet Network Connection
===========================================================
diff --git a/Documentation/networking/device_drivers/intel/e1000e.rst b/Documentation/networking/device_drivers/intel/e1000e.rst
index 33554e5416c5..01999f05509c 100644
--- a/Documentation/networking/device_drivers/intel/e1000e.rst
+++ b/Documentation/networking/device_drivers/intel/e1000e.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+======================================================
Linux* Driver for Intel(R) Ethernet Network Connection
======================================================
diff --git a/Documentation/networking/device_drivers/intel/fm10k.rst b/Documentation/networking/device_drivers/intel/fm10k.rst
index bf5e5942f28d..ac3269e34f55 100644
--- a/Documentation/networking/device_drivers/intel/fm10k.rst
+++ b/Documentation/networking/device_drivers/intel/fm10k.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==============================================================
Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
==============================================================
diff --git a/Documentation/networking/device_drivers/intel/i40e.rst b/Documentation/networking/device_drivers/intel/i40e.rst
index 0cc16c525d10..848fd388fa6e 100644
--- a/Documentation/networking/device_drivers/intel/i40e.rst
+++ b/Documentation/networking/device_drivers/intel/i40e.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==================================================================
Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
==================================================================
diff --git a/Documentation/networking/device_drivers/intel/iavf.rst b/Documentation/networking/device_drivers/intel/iavf.rst
index f8b42b64eb28..2d0c3baa1752 100644
--- a/Documentation/networking/device_drivers/intel/iavf.rst
+++ b/Documentation/networking/device_drivers/intel/iavf.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+==================================================================
Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
==================================================================
diff --git a/Documentation/networking/device_drivers/intel/ice.rst b/Documentation/networking/device_drivers/intel/ice.rst
index 4d118b827bbb..c220aa2711c6 100644
--- a/Documentation/networking/device_drivers/intel/ice.rst
+++ b/Documentation/networking/device_drivers/intel/ice.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+===================================================================
Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
===================================================================
diff --git a/Documentation/networking/device_drivers/intel/igb.rst b/Documentation/networking/device_drivers/intel/igb.rst
index e87a4a72ea2d..fc8cfaa5dcfa 100644
--- a/Documentation/networking/device_drivers/intel/igb.rst
+++ b/Documentation/networking/device_drivers/intel/igb.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+===========================================================
Linux* Base Driver for Intel(R) Ethernet Network Connection
===========================================================
diff --git a/Documentation/networking/device_drivers/intel/igbvf.rst b/Documentation/networking/device_drivers/intel/igbvf.rst
index a8a9ffa4f8d3..9cddabe8108e 100644
--- a/Documentation/networking/device_drivers/intel/igbvf.rst
+++ b/Documentation/networking/device_drivers/intel/igbvf.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+============================================================
Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
============================================================
diff --git a/Documentation/networking/device_drivers/intel/ixgb.rst b/Documentation/networking/device_drivers/intel/ixgb.rst
index 8bd80e27843d..945018207a92 100644
--- a/Documentation/networking/device_drivers/intel/ixgb.rst
+++ b/Documentation/networking/device_drivers/intel/ixgb.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+=====================================================================
Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
=====================================================================
diff --git a/Documentation/networking/device_drivers/intel/ixgbe.rst b/Documentation/networking/device_drivers/intel/ixgbe.rst
index 86d887a63606..c7d25483fedb 100644
--- a/Documentation/networking/device_drivers/intel/ixgbe.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbe.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+=============================================================================
Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
=============================================================================
diff --git a/Documentation/networking/device_drivers/intel/ixgbevf.rst b/Documentation/networking/device_drivers/intel/ixgbevf.rst
index 56cde6366c2f..5d4977360157 100644
--- a/Documentation/networking/device_drivers/intel/ixgbevf.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbevf.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0+
+=============================================================
Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
=============================================================
diff --git a/Documentation/networking/device_drivers/stmicro/stmmac.txt b/Documentation/networking/device_drivers/stmicro/stmmac.txt
index 2bb07078f535..1ae979fd90d2 100644
--- a/Documentation/networking/device_drivers/stmicro/stmmac.txt
+++ b/Documentation/networking/device_drivers/stmicro/stmmac.txt
@@ -267,7 +267,7 @@ static struct fixed_phy_status stmmac0_fixed_phy_status = {
During the board's device_init we can configure the first
MAC for fixed_link by calling:
- fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status, -1);
+ fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status);
and the second one, with a real PHY device attached to the bus,
by using the stmmac_mdio_bus_data structure (to provide the id, the
reset procedure etc).
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index ad1185c68df7..6b3ab583c698 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -127,7 +127,7 @@ static struct fixed_phy_status nettel_fixed_phy_status __initdata = {
static int __init init_BSP(void)
{
m5272_uarts_init();
- fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status, -1);
+ fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
return 0;
}
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index f09262e0a72f..10ff07b7721e 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -683,7 +683,7 @@ static int __init ar7_register_devices(void)
if (ar7_has_high_cpmac()) {
res = fixed_phy_add(PHY_POLL, cpmac_high.id,
- &fixed_phy_status, -1);
+ &fixed_phy_status);
if (!res) {
cpmac_get_mac(1, cpmac_high_data.dev_addr);
@@ -696,7 +696,7 @@ static int __init ar7_register_devices(void)
} else
cpmac_low_data.phy_mask = 0xffffffff;
- res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status, -1);
+ res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
if (!res) {
cpmac_get_mac(0, cpmac_low_data.dev_addr);
res = platform_device_register(&cpmac_low);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index fe3773539eff..82627c264964 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -274,7 +274,7 @@ static int __init bcm47xx_register_bus_complete(void)
bcm47xx_leds_register();
bcm47xx_workarounds();
- fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status, -1);
+ fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
return 0;
}
device_initcall(bcm47xx_register_bus_complete);
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index a4aac370f21f..6eded32d1aac 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -10,13 +10,13 @@
#include <linux/delay.h>
#define bcma_err(bus, fmt, ...) \
- pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_err((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
- pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_warn((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_info(bus, fmt, ...) \
- pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_info((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_debug(bus, fmt, ...) \
- pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ dev_dbg((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
struct bcma_bus;
@@ -33,7 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
-struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 2c0ffb77d738..a5df3d111334 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -183,7 +183,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->owner = THIS_MODULE;
- chip->parent = bcma_bus_get_host_dev(bus);
+ chip->parent = bus->dev;
#if IS_BUILTIN(CONFIG_OF)
chip->of_node = cc->core->dev.of_node;
#endif
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 63410ecfe640..f52239feb4cb 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -196,6 +196,8 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
goto err_pci_release_regions;
}
+ bus->dev = &dev->dev;
+
/* Map MMIO */
err = -ENOMEM;
bus->mmio = pci_iomap(dev, 0, ~0UL);
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 2dce34789329..c8073b509a2b 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -179,7 +179,6 @@ int __init bcma_host_soc_register(struct bcma_soc *soc)
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
- bus->host_pdev = NULL;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
@@ -213,6 +212,8 @@ static int bcma_host_soc_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
+ bus->dev = dev;
+
/* Map MMIO */
bus->mmio = of_iomap(np, 0);
if (!bus->mmio)
@@ -221,7 +222,6 @@ static int bcma_host_soc_probe(struct platform_device *pdev)
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
- bus->host_pdev = pdev;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index fc1f4acdd189..6535614a7dc1 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -223,8 +223,8 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num)
mips_irq = bcma_core_mips_irq(core);
return mips_irq <= 4 ? mips_irq + 2 : 0;
}
- if (bus->host_pdev)
- return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
+ if (bus->dev)
+ return bcma_of_get_irq(bus->dev, core, num);
return 0;
case BCMA_HOSTTYPE_SDIO:
return 0;
@@ -239,18 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
- core->dev.parent = bcma_bus_get_host_dev(bus);
- if (core->dev.parent)
- bcma_of_fill_device(core->dev.parent, core);
+ core->dev.parent = bus->dev;
+ if (bus->dev)
+ bcma_of_fill_device(bus->dev, core);
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
- core->dma_dev = &bus->host_pci->dev;
+ core->dma_dev = bus->dev;
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
- if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
- core->dma_dev = &bus->host_pdev->dev;
+ if (IS_ENABLED(CONFIG_OF) && bus->dev) {
+ core->dma_dev = bus->dev;
} else {
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
@@ -261,28 +261,6 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
-struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
-{
- switch (bus->hosttype) {
- case BCMA_HOSTTYPE_PCI:
- if (bus->host_pci)
- return &bus->host_pci->dev;
- else
- return NULL;
- case BCMA_HOSTTYPE_SOC:
- if (bus->host_pdev)
- return &bus->host_pdev->dev;
- else
- return NULL;
- case BCMA_HOSTTYPE_SDIO:
- if (bus->host_sdio)
- return &bus->host_sdio->dev;
- else
- return NULL;
- }
- return NULL;
-}
-
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
@@ -402,7 +380,6 @@ int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
- struct device *dev;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
@@ -425,10 +402,8 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
- dev = bcma_bus_get_host_dev(bus);
- if (dev) {
- of_platform_default_populate(dev->of_node, NULL, dev);
- }
+ if (bus->dev)
+ of_platform_default_populate(bus->dev->of_node, NULL, bus->dev);
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index e14663ab6dbc..6d8059dc77b7 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/bitmap.h>
+#include <net/flow_offload.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -257,7 +258,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
}
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
+ struct flow_dissector_key_ipv4_addrs *addrs,
+ struct flow_dissector_key_ports *ports,
unsigned int slice_num,
bool mask)
{
@@ -278,7 +280,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A6 [23:8]
* UDF_n_A5 [7:0]
*/
- reg = be16_to_cpu(v4_spec->pdst) >> 8;
+ reg = be16_to_cpu(ports->dst) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(3);
else
@@ -289,9 +291,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A4 [23:8]
* UDF_n_A3 [7:0]
*/
- reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
- (u32)be16_to_cpu(v4_spec->psrc) << 8 |
- (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+ reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
+ (u32)be16_to_cpu(ports->src) << 8 |
+ (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(2);
else
@@ -302,9 +304,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* UDF_n_A2 [23:8]
* UDF_n_A1 [7:0]
*/
- reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
- (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
- (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+ reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
+ (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
+ (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
if (mask)
offset = CORE_CFP_MASK_PORT(1);
else
@@ -317,8 +319,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
* Slice ID [3:2]
* Slice valid [1:0]
*/
- reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
- (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+ reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
+ (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
SLICE_NUM(slice_num) | SLICE_VALID;
if (mask)
offset = CORE_CFP_MASK_PORT(0);
@@ -332,9 +334,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int queue_num,
struct ethtool_rx_flow_spec *fs)
{
- struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+ struct ethtool_rx_flow_spec_input input = {};
const struct cfp_udf_layout *layout;
unsigned int slice_num, rule_index;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_match_ipv4_addrs ipv4;
+ struct flow_match_ports ports;
+ struct flow_match_ip ip;
u8 ip_proto, ip_frag;
u8 num_udf;
u32 reg;
@@ -343,13 +349,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
ip_proto = IPPROTO_TCP;
- v4_spec = &fs->h_u.tcp_ip4_spec;
- v4_m_spec = &fs->m_u.tcp_ip4_spec;
break;
case UDP_V4_FLOW:
ip_proto = IPPROTO_UDP;
- v4_spec = &fs->h_u.udp_ip4_spec;
- v4_m_spec = &fs->m_u.udp_ip4_spec;
break;
default:
return -EINVAL;
@@ -367,11 +369,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
if (rule_index > bcm_sf2_cfp_rule_size(priv))
return -ENOSPC;
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
+ flow_rule_match_ports(flow->rule, &ports);
+ flow_rule_match_ip(flow->rule, &ip);
+
layout = &udf_tcpip4_layout;
/* We only use one UDF slice for now */
slice_num = bcm_sf2_get_slice_number(layout, 0);
- if (slice_num == UDF_NUM_SLICES)
- return -EINVAL;
+ if (slice_num == UDF_NUM_SLICES) {
+ ret = -EINVAL;
+ goto out_err_flow_rule;
+ }
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -398,7 +411,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
* Reserved [1]
* UDF_Valid[8] [0]
*/
- core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
+ core_writel(priv, ip.key->tos << IPTOS_SHIFT |
ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
udf_upper_bits(num_udf),
CORE_CFP_DATA_PORT(6));
@@ -417,8 +430,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
/* Program the match and the mask */
- bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
- bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -426,14 +439,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index);
- return ret;
+ goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now */
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
queue_num, true);
if (ret)
- return ret;
+ goto out_err_flow_rule;
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -446,6 +459,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
fs->location = rule_index;
return 0;
+
+out_err_flow_rule:
+ ethtool_rx_flow_rule_destroy(flow);
+ return ret;
}
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
@@ -582,8 +599,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+ struct ethtool_rx_flow_spec_input input = {};
unsigned int slice_num, rule_index[2];
const struct cfp_udf_layout *layout;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_match_ipv6_addrs ipv6;
+ struct flow_match_ports ports;
u8 ip_proto, ip_frag;
int ret = 0;
u8 num_udf;
@@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
goto out_err;
}
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow)) {
+ ret = PTR_ERR(flow);
+ goto out_err;
+ }
+ flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
+ flow_rule_match_ports(flow->rule, &ports);
+
/* Apply the UDF layout for this filter */
bcm_sf2_cfp_udf_set(priv, layout, slice_num);
@@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
/* Slice the IPv6 source address and port */
- bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
- slice_num, false);
- bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
- SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
+ ports.key->src, slice_num, false);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
+ ports.mask->src, SLICE_NUM_MASK, true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
- goto out_err;
+ goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now */
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
queue_num, false);
if (ret)
- goto out_err;
+ goto out_err_flow_rule;
/* Now deal with the second slice to chain this rule */
slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
if (slice_num == UDF_NUM_SLICES) {
ret = -EINVAL;
- goto out_err;
+ goto out_err_flow_rule;
}
num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
/* Mask all */
core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
- bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
- false);
- bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
- SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
+ ports.key->dst, slice_num, false);
+ bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
+ ports.key->dst, SLICE_NUM_MASK, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -759,7 +789,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
- goto out_err;
+ goto out_err_flow_rule;
}
/* Insert into Action and policer RAMs now, set chain ID to
@@ -768,7 +798,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
queue_num, true);
if (ret)
- goto out_err;
+ goto out_err_flow_rule;
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
return ret;
+out_err_flow_rule:
+ ethtool_rx_flow_rule_destroy(flow);
out_err:
clear_bit(rule_index[1], priv->cfp.used);
return ret;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 816f34d64736..17482ae09aa5 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -343,7 +343,7 @@ static int __init dsa_loop_init(void)
unsigned int i;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, -1, NULL);
+ phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
return mdio_driver_register(&dsa_loop_drv);
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 2d3a44c40221..4632dd5dbad1 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6a512871176b..1c2987c3d708 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -9981,8 +9981,11 @@ static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
return 0;
}
-int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
+int bnxt_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
+ struct bnxt *bp = netdev_priv(dev);
+
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return -EOPNOTSUPP;
@@ -9990,27 +9993,12 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(bp->switch_id);
- memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
+ ppid->id_len = sizeof(bp->switch_id);
+ memcpy(ppid->id, bp->switch_id, ppid->id_len);
-static int bnxt_swdev_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- return bnxt_port_attr_get(netdev_priv(dev), attr);
+ return 0;
}
-static const struct switchdev_ops bnxt_switchdev_ops = {
- .switchdev_port_attr_get = bnxt_swdev_port_attr_get
-};
-
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -10042,6 +10030,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_get_port_parent_id = bnxt_get_port_parent_id,
.ndo_get_phys_port_name = bnxt_get_phys_port_name
};
@@ -10400,7 +10389,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
- SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5c886a700bcc..17554d4be651 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -22,7 +22,6 @@
#include <linux/rhashtable.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
-#include <net/switchdev.h>
#include <net/xdp.h>
#include <linux/net_dim.h>
@@ -1795,7 +1794,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
-int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+int bnxt_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index c683b5e96b1d..61a3457ed71b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -45,7 +45,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
struct bnxt *bp;
/* check if dev belongs to the same switch */
- if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
+ if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
dev->ifindex);
return BNXT_FID_INVALID;
@@ -61,9 +61,9 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
static int bnxt_tc_parse_redir(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+ const struct flow_action_entry *act)
{
- struct net_device *dev = tcf_mirred_dev(tc_act);
+ struct net_device *dev = act->dev;
if (!dev) {
netdev_info(bp->dev, "no dev in mirred action");
@@ -77,16 +77,16 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
static int bnxt_tc_parse_vlan(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+ const struct flow_action_entry *act)
{
- switch (tcf_vlan_action(tc_act)) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
break;
- case TCA_VLAN_ACT_PUSH:
+ case FLOW_ACTION_VLAN_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
- actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
- actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+ actions->push_vlan_tci = htons(act->vlan.vid);
+ actions->push_vlan_tpid = act->vlan.proto;
break;
default:
return -EOPNOTSUPP;
@@ -96,10 +96,10 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp,
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+ const struct flow_action_entry *act)
{
- struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
- struct ip_tunnel_key *tun_key = &tun_info->key;
+ const struct ip_tunnel_info *tun_info = act->tunnel;
+ const struct ip_tunnel_key *tun_key = &tun_info->key;
if (ip_tunnel_info_af(tun_info) != AF_INET) {
netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
@@ -113,51 +113,43 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- struct tcf_exts *tc_exts)
+ struct flow_action *flow_action)
{
- const struct tc_action *tc_act;
+ struct flow_action_entry *act;
int i, rc;
- if (!tcf_exts_has_actions(tc_exts)) {
+ if (!flow_action_has_entries(flow_action)) {
netdev_info(bp->dev, "no actions");
return -EINVAL;
}
- tcf_exts_for_each_action(i, tc_act, tc_exts) {
- /* Drop action */
- if (is_tcf_gact_shot(tc_act)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
return 0; /* don't bother with other actions */
- }
-
- /* Redirect action */
- if (is_tcf_mirred_egress_redirect(tc_act)) {
- rc = bnxt_tc_parse_redir(bp, actions, tc_act);
+ case FLOW_ACTION_REDIRECT:
+ rc = bnxt_tc_parse_redir(bp, actions, act);
if (rc)
return rc;
- continue;
- }
-
- /* Push/pop VLAN */
- if (is_tcf_vlan(tc_act)) {
- rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
+ rc = bnxt_tc_parse_vlan(bp, actions, act);
if (rc)
return rc;
- continue;
- }
-
- /* Tunnel encap */
- if (is_tcf_tunnel_set(tc_act)) {
- rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
+ break;
+ case FLOW_ACTION_TUNNEL_ENCAP:
+ rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
if (rc)
return rc;
- continue;
- }
-
- /* Tunnel decap */
- if (is_tcf_tunnel_release(tc_act)) {
+ break;
+ case FLOW_ACTION_TUNNEL_DECAP:
actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
- continue;
+ break;
+ default:
+ break;
}
}
@@ -177,18 +169,12 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
return 0;
}
-#define GET_KEY(flow_cmd, key_type) \
- skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
- (flow_cmd)->key)
-#define GET_MASK(flow_cmd, key_type) \
- skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
- (flow_cmd)->mask)
-
static int bnxt_tc_parse_flow(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd,
struct bnxt_tc_flow *flow)
{
- struct flow_dissector *dissector = tc_flow_cmd->dissector;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd);
+ struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
@@ -198,143 +184,123 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
- struct flow_dissector_key_basic *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- flow->l2_key.ether_type = key->n_proto;
- flow->l2_mask.ether_type = mask->n_proto;
+ flow_rule_match_basic(rule, &match);
+ flow->l2_key.ether_type = match.key->n_proto;
+ flow->l2_mask.ether_type = match.mask->n_proto;
- if (key->n_proto == htons(ETH_P_IP) ||
- key->n_proto == htons(ETH_P_IPV6)) {
- flow->l4_key.ip_proto = key->ip_proto;
- flow->l4_mask.ip_proto = mask->ip_proto;
+ if (match.key->n_proto == htons(ETH_P_IP) ||
+ match.key->n_proto == htons(ETH_P_IPV6)) {
+ flow->l4_key.ip_proto = match.key->ip_proto;
+ flow->l4_mask.ip_proto = match.mask->ip_proto;
}
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
- struct flow_dissector_key_eth_addrs *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ flow_rule_match_eth_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
- ether_addr_copy(flow->l2_key.dmac, key->dst);
- ether_addr_copy(flow->l2_mask.dmac, mask->dst);
- ether_addr_copy(flow->l2_key.smac, key->src);
- ether_addr_copy(flow->l2_mask.smac, mask->src);
+ ether_addr_copy(flow->l2_key.dmac, match.key->dst);
+ ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
+ ether_addr_copy(flow->l2_key.smac, match.key->src);
+ ether_addr_copy(flow->l2_mask.smac, match.mask->src);
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
- struct flow_dissector_key_vlan *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ flow_rule_match_vlan(rule, &match);
flow->l2_key.inner_vlan_tci =
- cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
+ cpu_to_be16(VLAN_TCI(match.key->vlan_id,
+ match.key->vlan_priority));
flow->l2_mask.inner_vlan_tci =
- cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
+ cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
+ match.mask->vlan_priority)));
flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
flow->l2_mask.inner_vlan_tpid = htons(0xffff);
flow->l2_key.num_vlans = 1;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- struct flow_dissector_key_ipv4_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
- struct flow_dissector_key_ipv4_addrs *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+ flow_rule_match_ipv4_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
- flow->l3_key.ipv4.daddr.s_addr = key->dst;
- flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
- flow->l3_key.ipv4.saddr.s_addr = key->src;
- flow->l3_mask.ipv4.saddr.s_addr = mask->src;
- } else if (dissector_uses_key(dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- struct flow_dissector_key_ipv6_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
- struct flow_dissector_key_ipv6_addrs *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
-
+ flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
+ flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
+ flow->l3_key.ipv4.saddr.s_addr = match.key->src;
+ flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
- flow->l3_key.ipv6.daddr = key->dst;
- flow->l3_mask.ipv6.daddr = mask->dst;
- flow->l3_key.ipv6.saddr = key->src;
- flow->l3_mask.ipv6.saddr = mask->src;
+ flow->l3_key.ipv6.daddr = match.key->dst;
+ flow->l3_mask.ipv6.daddr = match.mask->dst;
+ flow->l3_key.ipv6.saddr = match.key->src;
+ flow->l3_mask.ipv6.saddr = match.mask->src;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
- struct flow_dissector_key_ports *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+ flow_rule_match_ports(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
- flow->l4_key.ports.dport = key->dst;
- flow->l4_mask.ports.dport = mask->dst;
- flow->l4_key.ports.sport = key->src;
- flow->l4_mask.ports.sport = mask->src;
+ flow->l4_key.ports.dport = match.key->dst;
+ flow->l4_mask.ports.dport = match.mask->dst;
+ flow->l4_key.ports.sport = match.key->src;
+ flow->l4_mask.ports.sport = match.mask->src;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
- struct flow_dissector_key_icmp *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
- struct flow_dissector_key_icmp *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+ flow_rule_match_icmp(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
- flow->l4_key.icmp.type = key->type;
- flow->l4_key.icmp.code = key->code;
- flow->l4_mask.icmp.type = mask->type;
- flow->l4_mask.icmp.code = mask->code;
+ flow->l4_key.icmp.type = match.key->type;
+ flow->l4_key.icmp.code = match.key->code;
+ flow->l4_mask.icmp.type = match.mask->type;
+ flow->l4_mask.icmp.code = match.mask->code;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- struct flow_dissector_key_ipv4_addrs *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
- struct flow_dissector_key_ipv4_addrs *mask =
- GET_MASK(tc_flow_cmd,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
- flow->tun_key.u.ipv4.dst = key->dst;
- flow->tun_mask.u.ipv4.dst = mask->dst;
- flow->tun_key.u.ipv4.src = key->src;
- flow->tun_mask.u.ipv4.src = mask->src;
- } else if (dissector_uses_key(dissector,
+ flow->tun_key.u.ipv4.dst = match.key->dst;
+ flow->tun_mask.u.ipv4.dst = match.mask->dst;
+ flow->tun_key.u.ipv4.src = match.key->src;
+ flow->tun_mask.u.ipv4.src = match.mask->src;
+ } else if (flow_rule_match_key(rule,
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
return -EOPNOTSUPP;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
- struct flow_dissector_key_keyid *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+ flow_rule_match_enc_keyid(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
- flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
- flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
+ flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
+ flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- struct flow_dissector_key_ports *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
- struct flow_dissector_key_ports *mask =
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ struct flow_match_ports match;
+ flow_rule_match_enc_ports(rule, &match);
flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
- flow->tun_key.tp_dst = key->dst;
- flow->tun_mask.tp_dst = mask->dst;
- flow->tun_key.tp_src = key->src;
- flow->tun_mask.tp_src = mask->src;
+ flow->tun_key.tp_dst = match.key->dst;
+ flow->tun_mask.tp_dst = match.mask->dst;
+ flow->tun_key.tp_src = match.key->src;
+ flow->tun_mask.tp_src = match.mask->src;
}
- return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
@@ -1422,8 +1388,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
lastused = flow->lastused;
spin_unlock(&flow->stats_lock);
- tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
- lastused);
+ flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
+ lastused);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 9a25c05aa571..2bdd2da9aac7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -237,21 +237,17 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int bnxt_vf_rep_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
+static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
/* as only PORT_PARENT_ID is supported currently use common code
* between PF and VF-rep for now.
*/
- return bnxt_port_attr_get(vf_rep->bp, attr);
+ return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid);
}
-static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = {
- .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get
-};
-
static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
.get_drvinfo = bnxt_vf_rep_get_drvinfo
};
@@ -262,6 +258,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_start_xmit = bnxt_vf_rep_xmit,
.ndo_get_stats64 = bnxt_vf_rep_get_stats64,
.ndo_setup_tc = bnxt_vf_rep_setup_tc,
+ .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id,
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
@@ -392,7 +389,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
- SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops);
/* Just inherit all the featues of the parent PF as the VF-R
* uses the RX/TX rings of the parent PF
*/
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index aceb9b7b55bd..51880d83131a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -525,7 +525,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phydev || IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 3d24133e5e49..e97e6754ee09 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -21,7 +21,6 @@
#include <linux/firmware.h>
#include <net/vxlan.h>
#include <linux/kthread.h>
-#include <net/switchdev.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -3184,7 +3183,8 @@ static const struct devlink_ops liquidio_devlink_ops = {
};
static int
-lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+liquidio_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct lio *lio = GET_LIO(dev);
struct octeon_device *oct = lio->oct_dev;
@@ -3192,24 +3192,12 @@ lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return -EOPNOTSUPP;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id,
- (void *)&lio->linfo.hw_addr + 2);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = ETH_ALEN;
+ ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
return 0;
}
-static const struct switchdev_ops lio_pf_switchdev_ops = {
- .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
-};
-
static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
struct ifla_vf_stats *vf_stats)
{
@@ -3259,6 +3247,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_trust = liquidio_set_vf_trust,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
.ndo_get_vf_stats = liquidio_get_vf_stats,
+ .ndo_get_port_parent_id = liquidio_get_port_parent_id,
};
/** \brief Entry point for the liquidio module
@@ -3534,7 +3523,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
* netdev tasks.
*/
netdev->netdev_ops = &lionetdevops;
- SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
if (retval) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index de61060721c4..f3f2e71431ac 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -25,7 +25,6 @@
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
-#include <net/switchdev.h>
#include "lio_vf_rep.h"
static int lio_vf_rep_open(struct net_device *ndev);
@@ -38,6 +37,8 @@ static int lio_vf_rep_phys_port_name(struct net_device *dev,
static void lio_vf_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64);
static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
+static int lio_vf_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
static const struct net_device_ops lio_vf_rep_ndev_ops = {
.ndo_open = lio_vf_rep_open,
@@ -47,6 +48,7 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = {
.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
.ndo_get_stats64 = lio_vf_rep_get_stats64,
.ndo_change_mtu = lio_vf_rep_change_mtu,
+ .ndo_get_port_parent_id = lio_vf_get_port_parent_id,
};
static int
@@ -443,31 +445,19 @@ xmit_failed:
return NETDEV_TX_OK;
}
-static int
-lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int lio_vf_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
struct net_device *parent_ndev = vf_rep->parent_ndev;
struct lio *lio = GET_LIO(parent_ndev);
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id,
- (void *)&lio->linfo.hw_addr + 2);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = ETH_ALEN;
+ ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
return 0;
}
-static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
- .switchdev_port_attr_get = lio_vf_rep_attr_get,
-};
-
static void
lio_vf_rep_fetch_stats(struct work_struct *work)
{
@@ -524,7 +514,6 @@ lio_vf_rep_create(struct octeon_device *oct)
ndev->min_mtu = LIO_MIN_MTU_SIZE;
ndev->max_mtu = LIO_MAX_MTU_SIZE;
ndev->netdev_ops = &lio_vf_rep_ndev_ops;
- SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
vf_rep = netdev_priv(ndev);
memset(vf_rep, 0, sizeof(*vf_rep));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index c116f96956fe..82a8d1970060 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -83,28 +83,23 @@ static void cxgb4_process_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
u16 addr_type = 0;
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- cls->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->mask);
- u16 ethtype_key = ntohs(key->n_proto);
- u16 ethtype_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+ u16 ethtype_key, ethtype_mask;
+
+ flow_rule_match_basic(rule, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
if (ethtype_key == ETH_P_ALL) {
ethtype_key = 0;
@@ -116,115 +111,89 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask;
- fs->val.proto = key->ip_proto;
- fs->mask.proto = mask->ip_proto;
+ fs->val.proto = match.key->ip_proto;
+ fs->mask.proto = match.mask->ip_proto;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- cls->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- cls->mask);
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
fs->type = 0;
- memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
- memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
- memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
- memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
+ memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
+ memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
+ memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
/* also initialize nat_lip/fip to same values */
- memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
- memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
-
+ memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
+ memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- cls->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- cls->mask);
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_ipv6_addrs(rule, &match);
fs->type = 1;
- memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
- memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
- memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
- memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
+ memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
+ sizeof(match.key->dst));
+ memcpy(&fs->val.fip[0], match.key->src.s6_addr,
+ sizeof(match.key->src));
+ memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
+ sizeof(match.mask->dst));
+ memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
+ sizeof(match.mask->src));
/* also initialize nat_lip/fip to same values */
- memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
- memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
+ memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
+ sizeof(match.key->dst));
+ memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
+ sizeof(match.key->src));
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- cls->mask);
- fs->val.lport = cpu_to_be16(key->dst);
- fs->mask.lport = cpu_to_be16(mask->dst);
- fs->val.fport = cpu_to_be16(key->src);
- fs->mask.fport = cpu_to_be16(mask->src);
+ flow_rule_match_ports(rule, &match);
+ fs->val.lport = cpu_to_be16(match.key->dst);
+ fs->mask.lport = cpu_to_be16(match.mask->dst);
+ fs->val.fport = cpu_to_be16(match.key->src);
+ fs->mask.fport = cpu_to_be16(match.mask->src);
/* also initialize nat_lport/fport to same values */
- fs->nat_lport = cpu_to_be16(key->dst);
- fs->nat_fport = cpu_to_be16(key->src);
+ fs->nat_lport = cpu_to_be16(match.key->dst);
+ fs->nat_fport = cpu_to_be16(match.key->src);
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *key, *mask;
-
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->mask);
- fs->val.tos = key->tos;
- fs->mask.tos = mask->tos;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ fs->val.tos = match.key->tos;
+ fs->mask.tos = match.mask->tos;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key, *mask;
-
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- cls->mask);
- fs->val.vni = be32_to_cpu(key->keyid);
- fs->mask.vni = be32_to_cpu(mask->keyid);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
+
+ flow_rule_match_enc_keyid(rule, &match);
+ fs->val.vni = be32_to_cpu(match.key->keyid);
+ fs->mask.vni = be32_to_cpu(match.mask->keyid);
if (fs->mask.vni) {
fs->val.encap_vld = 1;
fs->mask.encap_vld = 1;
}
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
u16 vlan_tci, vlan_tci_mask;
- key = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- cls->key);
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- cls->mask);
- vlan_tci = key->vlan_id | (key->vlan_priority <<
- VLAN_PRIO_SHIFT);
- vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
- VLAN_PRIO_SHIFT);
+ flow_rule_match_vlan(rule, &match);
+ vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT);
+ vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
+ VLAN_PRIO_SHIFT);
fs->val.ivlan = vlan_tci;
fs->mask.ivlan = vlan_tci_mask;
@@ -255,10 +224,12 @@ static void cxgb4_process_flow_match(struct net_device *dev,
static int cxgb4_validate_flow_match(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 ethtype_mask = 0;
u16 ethtype_key = 0;
- if (cls->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
@@ -268,36 +239,29 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(dev, "Unsupported key used: 0x%x\n",
- cls->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- cls->mask);
- ethtype_key = ntohs(key->n_proto);
- ethtype_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ ethtype_key = ntohs(match.key->n_proto);
+ ethtype_mask = ntohs(match.mask->n_proto);
}
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
u16 eth_ip_type = ethtype_key & ethtype_mask;
- struct flow_dissector_key_ip *mask;
+ struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
netdev_err(dev, "IP Key supported only with IPv4/v6");
return -EINVAL;
}
- mask = skb_flow_dissector_target(cls->dissector,
- FLOW_DISSECTOR_KEY_IP,
- cls->mask);
- if (mask->ttl) {
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl) {
netdev_warn(dev, "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
@@ -328,7 +292,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
u32 mask, u32 offset, u8 htype)
{
switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
switch (offset) {
case PEDIT_ETH_DMAC_31_0:
fs->newdmac = 1;
@@ -346,7 +310,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
offload_pedit(fs, val, mask, ETH_SMAC_47_16);
}
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
@@ -356,7 +320,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
@@ -384,7 +348,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -397,7 +361,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
}
fs->nat_mode = NAT_MODE_ALL;
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -416,56 +380,63 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct tc_cls_flower_offload *cls,
struct ch_filter_specification *fs)
{
- const struct tc_action *a;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_action_entry *act;
int i;
- tcf_exts_for_each_action(i, a, cls->exts) {
- if (is_tcf_gact_ok(a)) {
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
fs->action = FILTER_PASS;
- } else if (is_tcf_gact_shot(a)) {
+ break;
+ case FLOW_ACTION_DROP:
fs->action = FILTER_DROP;
- } else if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *out = tcf_mirred_dev(a);
+ break;
+ case FLOW_ACTION_REDIRECT: {
+ struct net_device *out = act->dev;
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
fs->eport = pi->port_id;
- } else if (is_tcf_vlan(a)) {
- u32 vlan_action = tcf_vlan_action(a);
- u8 prio = tcf_vlan_push_prio(a);
- u16 vid = tcf_vlan_push_vid(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
-
- switch (vlan_action) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
fs->newvlan |= VLAN_REMOVE;
break;
- case TCA_VLAN_ACT_PUSH:
+ case FLOW_ACTION_VLAN_PUSH:
fs->newvlan |= VLAN_INSERT;
fs->vlan = vlan_tci;
break;
- case TCA_VLAN_ACT_MODIFY:
+ case FLOW_ACTION_VLAN_MANGLE:
fs->newvlan |= VLAN_REWRITE;
fs->vlan = vlan_tci;
break;
default:
break;
}
- } else if (is_tcf_pedit(a)) {
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
u32 mask, val, offset;
- int nkeys, i;
u8 htype;
- nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- mask = tcf_pedit_mask(a, i);
- val = tcf_pedit_val(a, i);
- offset = tcf_pedit_offset(a, i);
+ htype = act->mangle.htype;
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
- process_pedit_field(fs, val, mask, offset,
- htype);
+ process_pedit_field(fs, val, mask, offset, htype);
}
+ break;
+ default:
+ break;
}
}
}
@@ -484,101 +455,89 @@ static bool valid_l4_mask(u32 mask)
}
static bool valid_pedit_action(struct net_device *dev,
- const struct tc_action *a)
+ const struct flow_action_entry *act)
{
u32 mask, offset;
- u8 cmd, htype;
- int nkeys, i;
-
- nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- cmd = tcf_pedit_cmd(a, i);
- mask = tcf_pedit_mask(a, i);
- offset = tcf_pedit_offset(a, i);
-
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
- netdev_err(dev, "%s: Unsupported pedit cmd\n",
+ u8 htype;
+
+ htype = act->mangle.htype;
+ mask = act->mangle.mask;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ switch (offset) {
+ case PEDIT_ETH_DMAC_31_0:
+ case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
+ case PEDIT_ETH_SMAC_47_16:
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- switch (offset) {
- case PEDIT_ETH_DMAC_31_0:
- case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
- case PEDIT_ETH_SMAC_47_16:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- switch (offset) {
- case PEDIT_IP4_SRC:
- case PEDIT_IP4_DST:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ case PEDIT_IP4_DST:
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- switch (offset) {
- case PEDIT_IP6_SRC_31_0:
- case PEDIT_IP6_SRC_63_32:
- case PEDIT_IP6_SRC_95_64:
- case PEDIT_IP6_SRC_127_96:
- case PEDIT_IP6_DST_31_0:
- case PEDIT_IP6_DST_63_32:
- case PEDIT_IP6_DST_95_64:
- case PEDIT_IP6_DST_127_96:
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
- return false;
- }
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ case PEDIT_IP6_SRC_63_32:
+ case PEDIT_IP6_SRC_95_64:
+ case PEDIT_IP6_SRC_127_96:
+ case PEDIT_IP6_DST_31_0:
+ case PEDIT_IP6_DST_63_32:
+ case PEDIT_IP6_DST_95_64:
+ case PEDIT_IP6_DST_127_96:
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- switch (offset) {
- case PEDIT_TCP_SPORT_DPORT:
- if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
- return false;
- }
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
__func__);
return false;
}
break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- switch (offset) {
- case PEDIT_UDP_SPORT_DPORT:
- if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
- return false;
- }
- break;
- default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
__func__);
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n",
+ netdev_err(dev, "%s: Unsupported pedit field\n",
__func__);
return false;
}
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ return false;
}
return true;
}
@@ -586,24 +545,26 @@ static bool valid_pedit_action(struct net_device *dev,
static int cxgb4_validate_flow_actions(struct net_device *dev,
struct tc_cls_flower_offload *cls)
{
- const struct tc_action *a;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls);
+ struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
int i;
- tcf_exts_for_each_action(i, a, cls->exts) {
- if (is_tcf_gact_ok(a)) {
- /* Do nothing */
- } else if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ case FLOW_ACTION_DROP:
/* Do nothing */
- } else if (is_tcf_mirred_egress_redirect(a)) {
+ break;
+ case FLOW_ACTION_REDIRECT: {
struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
unsigned int i;
bool found = false;
- target_dev = tcf_mirred_dev(a);
+ target_dev = act->dev;
for_each_port(adap, i) {
n_dev = adap->port[i];
if (target_dev == n_dev) {
@@ -621,15 +582,18 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}
act_redir = true;
- } else if (is_tcf_vlan(a)) {
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
- u32 vlan_action = tcf_vlan_action(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE: {
+ u16 proto = be16_to_cpu(act->vlan.proto);
- switch (vlan_action) {
- case TCA_VLAN_ACT_POP:
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
break;
- case TCA_VLAN_ACT_PUSH:
- case TCA_VLAN_ACT_MODIFY:
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
netdev_err(dev, "%s: Unsupported vlan proto\n",
__func__);
@@ -642,13 +606,17 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
return -EOPNOTSUPP;
}
act_vlan = true;
- } else if (is_tcf_pedit(a)) {
- bool pedit_valid = valid_pedit_action(dev, a);
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
+ bool pedit_valid = valid_pedit_action(dev, act);
if (!pedit_valid)
return -EOPNOTSUPP;
act_pedit = true;
- } else {
+ }
+ break;
+ default:
netdev_err(dev, "%s: Unsupported action\n", __func__);
return -EOPNOTSUPP;
}
@@ -843,9 +811,9 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
if (ofld_stats->packet_count != packets) {
if (ofld_stats->prev_packet_count != packets)
ofld_stats->last_used = jiffies;
- tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
- packets - ofld_stats->packet_count,
- ofld_stats->last_used);
+ flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
+ packets - ofld_stats->packet_count,
+ ofld_stats->last_used);
ofld_stats->packet_count = packets;
ofld_stats->byte_count = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index bf7325f6d553..0c5373462ced 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index a844296135b4..9125ddd89dd1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x14
-#define T4FW_VERSION_MICRO 0x08
+#define T4FW_VERSION_MINOR 0x16
+#define T4FW_VERSION_MICRO 0x09
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x14
-#define T5FW_VERSION_MICRO 0x08
+#define T5FW_VERSION_MINOR 0x16
+#define T5FW_VERSION_MICRO 0x09
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x14
-#define T6FW_VERSION_MICRO 0x08
+#define T6FW_VERSION_MINOR 0x16
+#define T6FW_VERSION_MICRO 0x09
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 04925c731f0b..87777b09f5e0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
if (dpaa2_sg_is_final(&sgt[i]))
break;
}
free_buf:
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
}
/* Build a linear skb based on a single-buffer frame descriptor */
@@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
ch->buf_count--;
- skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
if (unlikely(!skb))
return NULL;
@@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Get the address and length from the S/G entry */
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
- dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
if (i == 0) {
/* We build the skb around the first data buffer */
- skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
if (unlikely(!skb)) {
/* Free the first SG entry now, since we already
* unmapped it and obtained the virtual address
*/
- skb_free_frag(sg_vaddr);
+ free_pages((unsigned long)sg_vaddr, 0);
/* We still need to subtract the buffers used
* by this FD from our software counter
@@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- skb_free_frag(vaddr);
+ dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
}
}
@@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
fq = &priv->fq[queue_id];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, 0,
- fq->tx_qdbin, fd);
+ err = priv->enqueue(priv, fq, fd, 0);
if (err != -EBUSY)
break;
}
@@ -378,16 +376,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
- skb_free_frag(vaddr);
+ free_pages((unsigned long)vaddr, 0);
percpu_extras->rx_sg_frames++;
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
} else {
@@ -657,7 +655,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* dpaa2_eth_tx().
*/
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd)
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr;
@@ -712,7 +710,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
skb_free_frag(skbh);
/* Move on with skb release */
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, in_napi);
}
static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
@@ -785,9 +783,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
queue_mapping = skb_get_queue_mapping(skb);
fq = &priv->fq[queue_mapping];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
- priv->tx_qdid, 0,
- fq->tx_qdbin, &fd);
+ err = priv->enqueue(priv, fq, &fd, 0);
if (err != -EBUSY)
break;
}
@@ -795,7 +791,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, &fd);
+ free_tx_fd(priv, &fd, false);
} else {
fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
@@ -837,7 +833,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- free_tx_fd(priv, fd);
+ free_tx_fd(priv, fd, true);
if (likely(!fd_errors))
return;
@@ -903,7 +899,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
- void *buf;
+ struct page *page;
dma_addr_t addr;
int i, err;
@@ -911,14 +907,16 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* Allocate buffer visible to WRIOP + skb shared info +
* alignment padding
*/
- buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
- if (unlikely(!buf))
+ /* allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
goto err_alloc;
- buf = PTR_ALIGN(buf, priv->rx_buf_align);
-
- addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
- DMA_BIDIRECTIONAL);
+ addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -926,7 +924,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
- buf, dpaa2_eth_buf_raw_size(priv),
+ page, DPAA2_ETH_RX_BUF_RAW_SIZE,
addr, DPAA2_ETH_RX_BUF_SIZE,
bpid);
}
@@ -948,7 +946,7 @@ release_bufs:
return i;
err_map:
- skb_free_frag(buf);
+ __free_pages(page, 0);
err_alloc:
/* If we managed to allocate at least some buffers,
* release them to hardware
@@ -2134,6 +2132,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
+ u16 rx_buf_align;
int err;
/* We need to check for WRIOP version 1.0.0, but depending on the MC
@@ -2142,9 +2141,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
*/
if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
- priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
else
- priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
@@ -2184,7 +2183,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
/* rx buffer */
buf_layout.pass_frame_status = true;
buf_layout.pass_parser_result = true;
- buf_layout.data_align = priv->rx_buf_align;
+ buf_layout.data_align = rx_buf_align;
buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
buf_layout.private_data_size = 0;
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
@@ -2202,6 +2201,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
return 0;
}
+#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
+#define DPNI_ENQUEUE_FQID_VER_MINOR 9
+
+static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio)
+{
+ return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, prio,
+ fq->tx_qdbin, fd);
+}
+
+static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd,
+ u8 prio __always_unused)
+{
+ return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
+ fq->tx_fqid, fd);
+}
+
+static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
+{
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+ DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+ priv->enqueue = dpaa2_eth_enqueue_qd;
+ else
+ priv->enqueue = dpaa2_eth_enqueue_fq;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2255,6 +2284,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
if (err)
goto close;
+ set_enqueue_mode(priv);
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
@@ -2339,6 +2370,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
}
fq->tx_qdbin = qid.qdbin;
+ fq->tx_fqid = qid.fqid;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 31fe486ec25f..9510928b7cca 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -53,7 +53,8 @@
*/
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
-#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+#define DPAA2_ETH_REFILL_THRESH \
+ (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
@@ -63,9 +64,11 @@
/* Hardware requires alignment for ingress/egress buffer addresses */
#define DPAA2_ETH_TX_BUF_ALIGN 64
-#define DPAA2_ETH_RX_BUF_SIZE 2048
-#define DPAA2_ETH_SKB_SIZE \
- (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_ETH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_ETH_RX_BUF_SIZE \
+ (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
/* Hardware annotation area in RX/TX buffers */
#define DPAA2_ETH_RX_HWA_SIZE 64
@@ -274,6 +277,7 @@ struct dpaa2_eth_priv;
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
+ u32 tx_fqid;
u16 flowid;
int target_cpu;
u32 dq_frames;
@@ -326,6 +330,9 @@ struct dpaa2_eth_priv {
u8 num_fqs;
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+ int (*enqueue)(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ struct dpaa2_fd *fd, u8 prio);
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
@@ -343,7 +350,6 @@ struct dpaa2_eth_priv {
bool rx_tstamp; /* Rx timestamping enabled */
u16 tx_qdid;
- u16 rx_buf_align;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
* This is the cpu set on which Rx and Tx conf frames are processed
@@ -418,15 +424,6 @@ enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_CLS
};
-/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
- * the buffer also needs space for its shared info struct, and we need
- * to allocate enough to accommodate hardware alignment restrictions
- */
-static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
-{
- return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
-}
-
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
@@ -451,8 +448,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
*/
static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
{
- return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
- DPAA2_ETH_RX_HWA_SIZE;
+ return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 257bd59bc9c6..f86d55657959 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -696,11 +696,16 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
- if (ret_val)
- return ret_val;
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ e_dbg("Error disabling far-end loopback\n");
+ } else {
+ e_dbg("Error disabling far-end loopback\n");
+ }
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val)
@@ -754,11 +759,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
- kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
- e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (!ret_val) {
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+ if (ret_val)
+ e_dbg("Error disabling far-end loopback\n");
+ } else {
+ e_dbg("Error disabling far-end loopback\n");
+ }
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 189f231075c2..736fa51878f8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5309,8 +5309,13 @@ static void e1000_watchdog_task(struct work_struct *work)
/* 8000ES2LAN requires a Rx packet buffer work-around
* on link down event; reset the controller to flush
* the Rx packet buffer.
+ *
+ * If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
*/
- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+ e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -5333,14 +5338,6 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
- /* If the link is lost the controller stops DMA, but
- * if there is queued Tx work it cannot be done. So
- * reset the controller to flush the Tx packet buffers.
- */
- if (!netif_carrier_ok(netdev) &&
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
- adapter->flags |= FLAG_RESTART_NOW;
-
/* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
@@ -7351,6 +7348,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e1000_print_device_info(adapter);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 8f0a99b6a537..cb4d02629b86 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1148,7 +1148,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* @results: Pointer array to message, results[0] is pointer to message
* @mbx: Pointer to mailbox information structure
*
- * This function is a default handler for MSI-X requests from the VF. The
+ * This function is a default handler for MSI-X requests from the VF. The
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5c6731b97059..44856a84738d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7169,11 +7169,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
struct tc_cls_flower_offload *f,
struct i40e_cloud_filter *filter)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
struct i40e_pf *pf = vsi->back;
u8 field_flags = 0;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -7183,143 +7185,109 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
-
- if (mask->keyid != 0)
+ flow_rule_match_enc_keyid(rule, &match);
+ if (match.mask->keyid != 0)
field_flags |= I40E_CLOUD_FIELD_TEN_ID;
- filter->tenant_id = be32_to_cpu(key->keyid);
+ filter->tenant_id = be32_to_cpu(match.key->keyid);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
-
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- n_proto_key = ntohs(key->n_proto);
- n_proto_mask = ntohs(mask->n_proto);
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
n_proto_mask = 0;
}
filter->n_proto = n_proto_key & n_proto_mask;
- filter->ip_proto = key->ip_proto;
+ filter->ip_proto = match.key->ip_proto;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
+ flow_rule_match_eth_addrs(rule, &match);
/* use is_broadcast and is_zero to check for all 0xf or 0 */
- if (!is_zero_ether_addr(mask->dst)) {
- if (is_broadcast_ether_addr(mask->dst)) {
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (is_broadcast_ether_addr(match.mask->dst)) {
field_flags |= I40E_CLOUD_FIELD_OMAC;
} else {
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
- mask->dst);
+ match.mask->dst);
return I40E_ERR_CONFIG;
}
}
- if (!is_zero_ether_addr(mask->src)) {
- if (is_broadcast_ether_addr(mask->src)) {
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (is_broadcast_ether_addr(match.mask->src)) {
field_flags |= I40E_CLOUD_FIELD_IMAC;
} else {
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
- mask->src);
+ match.mask->src);
return I40E_ERR_CONFIG;
}
}
- ether_addr_copy(filter->dst_mac, key->dst);
- ether_addr_copy(filter->src_mac, key->src);
+ ether_addr_copy(filter->dst_mac, match.key->dst);
+ ether_addr_copy(filter->src_mac, match.key->src);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
- if (mask->vlan_id) {
- if (mask->vlan_id == VLAN_VID_MASK) {
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
field_flags |= I40E_CLOUD_FIELD_IVLAN;
} else {
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
- mask->vlan_id);
+ match.mask->vlan_id);
return I40E_ERR_CONFIG;
}
}
- filter->vlan_id = cpu_to_be16(key->vlan_id);
+ filter->vlan_id = cpu_to_be16(match.key->vlan_id);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
-
- if (mask->dst) {
- if (mask->dst == cpu_to_be32(0xffffffff)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
- &mask->dst);
+ &match.mask->dst);
return I40E_ERR_CONFIG;
}
}
- if (mask->src) {
- if (mask->src == cpu_to_be32(0xffffffff)) {
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
- &mask->src);
+ &match.mask->src);
return I40E_ERR_CONFIG;
}
}
@@ -7328,70 +7296,60 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG;
}
- filter->dst_ipv4 = key->dst;
- filter->src_ipv4 = key->src;
+ filter->dst_ipv4 = match.key->dst;
+ filter->src_ipv4 = match.key->src;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
/* src and dest IPV6 address should not be LOOPBACK
* (0:0:0:0:0:0:0:1), which can be represented as ::1
*/
- if (ipv6_addr_loopback(&key->dst) ||
- ipv6_addr_loopback(&key->src)) {
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
dev_err(&pf->pdev->dev,
"Bad ipv6, addr is LOOPBACK\n");
return I40E_ERR_CONFIG;
}
- if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ if (!ipv6_addr_any(&match.mask->dst) ||
+ !ipv6_addr_any(&match.mask->src))
field_flags |= I40E_CLOUD_FIELD_IIP;
- memcpy(&filter->src_ipv6, &key->src.s6_addr32,
+ memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
sizeof(filter->src_ipv6));
- memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
+ memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
sizeof(filter->dst_ipv6));
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
- if (mask->src) {
- if (mask->src == cpu_to_be16(0xffff)) {
+ flow_rule_match_ports(rule, &match);
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
- be16_to_cpu(mask->src));
+ be16_to_cpu(match.mask->src));
return I40E_ERR_CONFIG;
}
}
- if (mask->dst) {
- if (mask->dst == cpu_to_be16(0xffff)) {
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be16(0xffff)) {
field_flags |= I40E_CLOUD_FIELD_IIP;
} else {
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
- be16_to_cpu(mask->dst));
+ be16_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
- filter->dst_port = key->dst;
- filter->src_port = key->src;
+ filter->dst_port = match.key->dst;
+ filter->src_port = match.key->src;
switch (filter->ip_proto) {
case IPPROTO_TCP:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 9f2b7b7adf6b..4569d69a2b55 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2439,6 +2439,8 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
struct tc_cls_flower_offload *f,
struct iavf_cloud_filter *filter)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
u8 field_flags = 0;
@@ -2447,7 +2449,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
int i = 0;
struct virtchnl_filter *vf = &filter->f;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -2457,32 +2459,24 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
- if (mask->keyid != 0)
+ flow_rule_match_enc_keyid(rule, &match);
+ if (match.mask->keyid != 0)
field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- n_proto_key = ntohs(key->n_proto);
- n_proto_mask = ntohs(mask->n_proto);
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
@@ -2496,122 +2490,103 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
}
- if (key->ip_proto != IPPROTO_TCP) {
+ if (match.key->ip_proto != IPPROTO_TCP) {
dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
return -EINVAL;
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
/* use is_broadcast and is_zero to check for all 0xf or 0 */
- if (!is_zero_ether_addr(mask->dst)) {
- if (is_broadcast_ether_addr(mask->dst)) {
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (is_broadcast_ether_addr(match.mask->dst)) {
field_flags |= IAVF_CLOUD_FIELD_OMAC;
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
- mask->dst);
+ match.mask->dst);
return I40E_ERR_CONFIG;
}
}
- if (!is_zero_ether_addr(mask->src)) {
- if (is_broadcast_ether_addr(mask->src)) {
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (is_broadcast_ether_addr(match.mask->src)) {
field_flags |= IAVF_CLOUD_FIELD_IMAC;
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
- mask->src);
+ match.mask->src);
return I40E_ERR_CONFIG;
}
}
- if (!is_zero_ether_addr(key->dst))
- if (is_valid_ether_addr(key->dst) ||
- is_multicast_ether_addr(key->dst)) {
+ if (!is_zero_ether_addr(match.key->dst))
+ if (is_valid_ether_addr(match.key->dst) ||
+ is_multicast_ether_addr(match.key->dst)) {
/* set the mask if a valid dst_mac address */
for (i = 0; i < ETH_ALEN; i++)
vf->mask.tcp_spec.dst_mac[i] |= 0xff;
ether_addr_copy(vf->data.tcp_spec.dst_mac,
- key->dst);
+ match.key->dst);
}
- if (!is_zero_ether_addr(key->src))
- if (is_valid_ether_addr(key->src) ||
- is_multicast_ether_addr(key->src)) {
+ if (!is_zero_ether_addr(match.key->src))
+ if (is_valid_ether_addr(match.key->src) ||
+ is_multicast_ether_addr(match.key->src)) {
/* set the mask if a valid dst_mac address */
for (i = 0; i < ETH_ALEN; i++)
vf->mask.tcp_spec.src_mac[i] |= 0xff;
ether_addr_copy(vf->data.tcp_spec.src_mac,
- key->src);
+ match.key->src);
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
- if (mask->vlan_id) {
- if (mask->vlan_id == VLAN_VID_MASK) {
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
field_flags |= IAVF_CLOUD_FIELD_IVLAN;
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
- mask->vlan_id);
+ match.mask->vlan_id);
return I40E_ERR_CONFIG;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
- vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
+ vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
-
- if (mask->dst) {
- if (mask->dst == cpu_to_be32(0xffffffff)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be32(0xffffffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
- be32_to_cpu(mask->dst));
+ be32_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
- if (mask->src) {
- if (mask->src == cpu_to_be32(0xffffffff)) {
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be32(0xffffffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
- be32_to_cpu(mask->dst));
+ be32_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
@@ -2620,28 +2595,23 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG;
}
- if (key->dst) {
+ if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
- vf->data.tcp_spec.dst_ip[0] = key->dst;
+ vf->data.tcp_spec.dst_ip[0] = match.key->dst;
}
- if (key->src) {
+ if (match.key->src) {
vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
- vf->data.tcp_spec.src_ip[0] = key->src;
+ vf->data.tcp_spec.src_ip[0] = match.key->src;
}
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
/* validate mask, make sure it is not IPV6_ADDR_ANY */
- if (ipv6_addr_any(&mask->dst)) {
+ if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
return I40E_ERR_CONFIG;
@@ -2650,61 +2620,56 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
/* src and dest IPv6 address should not be LOOPBACK
* (0:0:0:0:0:0:0:1) which can be represented as ::1
*/
- if (ipv6_addr_loopback(&key->dst) ||
- ipv6_addr_loopback(&key->src)) {
+ if (ipv6_addr_loopback(&match.key->dst) ||
+ ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
return I40E_ERR_CONFIG;
}
- if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ if (!ipv6_addr_any(&match.mask->dst) ||
+ !ipv6_addr_any(&match.mask->src))
field_flags |= IAVF_CLOUD_FIELD_IIP;
for (i = 0; i < 4; i++)
vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
- memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
+ memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
sizeof(vf->data.tcp_spec.dst_ip));
for (i = 0; i < 4; i++)
vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
- memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
+ memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
sizeof(vf->data.tcp_spec.src_ip));
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
-
- if (mask->src) {
- if (mask->src == cpu_to_be16(0xffff)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (match.mask->src) {
+ if (match.mask->src == cpu_to_be16(0xffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
- be16_to_cpu(mask->src));
+ be16_to_cpu(match.mask->src));
return I40E_ERR_CONFIG;
}
}
- if (mask->dst) {
- if (mask->dst == cpu_to_be16(0xffff)) {
+ if (match.mask->dst) {
+ if (match.mask->dst == cpu_to_be16(0xffff)) {
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
- be16_to_cpu(mask->dst));
+ be16_to_cpu(match.mask->dst));
return I40E_ERR_CONFIG;
}
}
- if (key->dst) {
+ if (match.key->dst) {
vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
- vf->data.tcp_spec.dst_port = key->dst;
+ vf->data.tcp_spec.dst_port = match.key->dst;
}
- if (key->src) {
+ if (match.key->src) {
vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
- vf->data.tcp_spec.src_port = key->src;
+ vf->data.tcp_spec.src_port = match.key->src;
}
}
vf->field_flags = field_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dfa357b1a9d6..6d812e96572d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -39,7 +39,7 @@
#include "igb.h"
#define MAJ 5
-#define MIN 4
+#define MIN 6
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
@@ -2581,9 +2581,11 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
int traffic_class,
struct igb_nfc_filter *input)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = f->common.extack;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -2593,78 +2595,60 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key, *mask;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
-
- if (!is_zero_ether_addr(mask->dst)) {
- if (!is_broadcast_ether_addr(mask->dst)) {
+ flow_rule_match_eth_addrs(rule, &match);
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ if (!is_broadcast_ether_addr(match.mask->dst)) {
NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
return -EINVAL;
}
input->filter.match_flags |=
IGB_FILTER_FLAG_DST_MAC_ADDR;
- ether_addr_copy(input->filter.dst_addr, key->dst);
+ ether_addr_copy(input->filter.dst_addr, match.key->dst);
}
- if (!is_zero_ether_addr(mask->src)) {
- if (!is_broadcast_ether_addr(mask->src)) {
+ if (!is_zero_ether_addr(match.mask->src)) {
+ if (!is_broadcast_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
return -EINVAL;
}
input->filter.match_flags |=
IGB_FILTER_FLAG_SRC_MAC_ADDR;
- ether_addr_copy(input->filter.src_addr, key->src);
+ ether_addr_copy(input->filter.src_addr, match.key->src);
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- if (mask->n_proto) {
- if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ flow_rule_match_basic(rule, &match);
+ if (match.mask->n_proto) {
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
return -EINVAL;
}
input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
- input->filter.etype = key->n_proto;
+ input->filter.etype = match.key->n_proto;
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
- if (mask->vlan_priority) {
- if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority) {
+ if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
return -EINVAL;
}
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
- input->filter.vlan_tci = key->vlan_priority;
+ input->filter.vlan_tci = match.key->vlan_priority;
}
}
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 4387f6ba8e67..88c6f88baac5 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_IGC) += igc.o
-igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o
+igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
+igc_ethtool.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index b1039dd3dd13..80faccc34cda 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -13,19 +13,43 @@
#include "igc_hw.h"
-/* main */
+/* forward declaration */
+void igc_set_ethtool_ops(struct net_device *);
+
+struct igc_adapter;
+struct igc_ring;
+
+void igc_up(struct igc_adapter *adapter);
+void igc_down(struct igc_adapter *adapter);
+int igc_setup_tx_resources(struct igc_ring *ring);
+int igc_setup_rx_resources(struct igc_ring *ring);
+void igc_free_tx_resources(struct igc_ring *ring);
+void igc_free_rx_resources(struct igc_ring *ring);
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues);
+int igc_reinit_queues(struct igc_adapter *adapter);
+bool igc_has_link(struct igc_adapter *adapter);
+void igc_reset(struct igc_adapter *adapter);
+int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
+
extern char igc_driver_name[];
extern char igc_driver_version[];
+#define IGC_REGS_LEN 740
+#define IGC_RETA_SIZE 128
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_FLAG_HAS_MSI BIT(0)
-#define IGC_FLAG_QUEUE_PAIRS BIT(4)
+#define IGC_FLAG_QUEUE_PAIRS BIT(3)
+#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
+#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
@@ -60,6 +84,7 @@ extern char igc_driver_version[];
#define IGC_RXBUFFER_2048 2048
#define IGC_RXBUFFER_3072 3072
+#define AUTO_ALL_MODES 0
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* RX and TX descriptor control thresholds.
@@ -340,6 +365,8 @@ struct igc_adapter {
struct igc_mac_addr *mac_table;
+ u8 rss_indir_tbl[IGC_RETA_SIZE];
+
unsigned long link_check_timeout;
struct igc_info ei;
};
@@ -418,6 +445,9 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
return 0;
}
+/* forward declaration */
+void igc_reinit_locked(struct igc_adapter *);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index df40af759542..51a8b8769c67 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -54,22 +54,6 @@ out:
}
/**
- * igc_check_for_link_base - Check for link
- * @hw: pointer to the HW structure
- *
- * If sgmii is enabled, then use the pcs register to determine link, otherwise
- * use the generic interface for determining link.
- */
-static s32 igc_check_for_link_base(struct igc_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = igc_check_for_copper_link(hw);
-
- return ret_val;
-}
-
-/**
* igc_reset_hw_base - Reset hardware
* @hw: pointer to the HW structure
*
@@ -124,22 +108,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
}
/**
- * igc_get_phy_id_base - Retrieve PHY addr and id
- * @hw: pointer to the HW structure
- *
- * Retrieves the PHY address and ID for both PHY's which do and do not use
- * sgmi interface.
- */
-static s32 igc_get_phy_id_base(struct igc_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = igc_get_phy_id(hw);
-
- return ret_val;
-}
-
-/**
* igc_init_nvm_params_base - Init NVM func ptrs.
* @hw: pointer to the HW structure
*/
@@ -163,6 +131,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
if (size > 15)
size = 15;
+ nvm->type = igc_nvm_eeprom_spi;
nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
@@ -261,11 +230,11 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
goto out;
}
- ret_val = igc_get_phy_id_base(hw);
+ ret_val = igc_get_phy_id(hw);
if (ret_val)
return ret_val;
- igc_check_for_link_base(hw);
+ igc_check_for_copper_link(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
@@ -350,26 +319,6 @@ static void igc_release_phy_base(struct igc_hw *hw)
}
/**
- * igc_get_link_up_info_base - Get link speed/duplex info
- * @hw: pointer to the HW structure
- * @speed: stores the current speed
- * @duplex: stores the current duplex
- *
- * This is a wrapper function, if using the serial gigabit media independent
- * interface, use PCS to retrieve the link speed and duplex information.
- * Otherwise, use the generic function to get the link speed and duplex info.
- */
-static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed,
- u16 *duplex)
-{
- s32 ret_val;
-
- ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex);
-
- return ret_val;
-}
-
-/**
* igc_init_hw_base - Initialize hardware
* @hw: pointer to the HW structure
*
@@ -408,19 +357,6 @@ static s32 igc_init_hw_base(struct igc_hw *hw)
}
/**
- * igc_read_mac_addr_base - Read device MAC address
- * @hw: pointer to the HW structure
- */
-static s32 igc_read_mac_addr_base(struct igc_hw *hw)
-{
- s32 ret_val = 0;
-
- ret_val = igc_read_mac_addr(hw);
-
- return ret_val;
-}
-
-/**
* igc_power_down_phy_copper_base - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -512,10 +448,10 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
- .check_for_link = igc_check_for_link_base,
+ .check_for_link = igc_check_for_copper_link,
.rar_set = igc_rar_set,
- .read_mac_addr = igc_read_mac_addr_base,
- .get_speed_and_duplex = igc_get_link_up_info_base,
+ .read_mac_addr = igc_read_mac_addr,
+ .get_speed_and_duplex = igc_get_speed_and_duplex_copper,
};
static const struct igc_phy_operations igc_phy_ops_base = {
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 35588fa7b8c5..76d4991d7284 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -36,28 +36,6 @@ union igc_adv_tx_desc {
#define IGC_RAR_ENTRIES 16
-struct igc_adv_data_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- u32 data;
- struct {
- u32 datalen:16; /* Data buffer length */
- u32 rsvd:4;
- u32 dtyp:4; /* Descriptor type */
- u32 dcmd:8; /* Descriptor command */
- } config;
- } lower;
- union {
- u32 data;
- struct {
- u32 status:4; /* Descriptor status */
- u32 idx:4;
- u32 popts:6; /* Packet Options */
- u32 paylen:18; /* Payload length */
- } options;
- } upper;
-};
-
/* Receive Descriptor - Advanced */
union igc_adv_rx_desc {
struct {
@@ -90,9 +68,6 @@ union igc_adv_rx_desc {
} wb; /* writeback */
};
-/* Adv Transmit Descriptor Config Masks */
-#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
-
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 8740754ea1fd..7d1bdcd1225a 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -4,6 +4,10 @@
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define REQ_RX_DESCRIPTOR_MULTIPLE 8
+
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
/* PCI Bus Info */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
new file mode 100644
index 000000000000..eff37a6c0afa
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -0,0 +1,1032 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+/* ethtool support for igc */
+#include <linux/pm_runtime.h>
+
+#include "igc.h"
+
+static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
+
+static void igc_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version));
+
+ /* add fw_version here */
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN;
+}
+
+static int igc_get_regs_len(struct net_device *netdev)
+{
+ return IGC_REGS_LEN * sizeof(u32);
+}
+
+static void igc_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u8 i;
+
+ memset(p, 0, IGC_REGS_LEN * sizeof(u32));
+
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = rd32(IGC_CTRL);
+ regs_buff[1] = rd32(IGC_STATUS);
+ regs_buff[2] = rd32(IGC_CTRL_EXT);
+ regs_buff[3] = rd32(IGC_MDIC);
+ regs_buff[4] = rd32(IGC_CONNSW);
+
+ /* NVM Register */
+ regs_buff[5] = rd32(IGC_EECD);
+
+ /* Interrupt */
+ /* Reading EICS for EICR because they read the
+ * same but EICS does not clear on read
+ */
+ regs_buff[6] = rd32(IGC_EICS);
+ regs_buff[7] = rd32(IGC_EICS);
+ regs_buff[8] = rd32(IGC_EIMS);
+ regs_buff[9] = rd32(IGC_EIMC);
+ regs_buff[10] = rd32(IGC_EIAC);
+ regs_buff[11] = rd32(IGC_EIAM);
+ /* Reading ICS for ICR because they read the
+ * same but ICS does not clear on read
+ */
+ regs_buff[12] = rd32(IGC_ICS);
+ regs_buff[13] = rd32(IGC_ICS);
+ regs_buff[14] = rd32(IGC_IMS);
+ regs_buff[15] = rd32(IGC_IMC);
+ regs_buff[16] = rd32(IGC_IAC);
+ regs_buff[17] = rd32(IGC_IAM);
+
+ /* Flow Control */
+ regs_buff[18] = rd32(IGC_FCAL);
+ regs_buff[19] = rd32(IGC_FCAH);
+ regs_buff[20] = rd32(IGC_FCTTV);
+ regs_buff[21] = rd32(IGC_FCRTL);
+ regs_buff[22] = rd32(IGC_FCRTH);
+ regs_buff[23] = rd32(IGC_FCRTV);
+
+ /* Receive */
+ regs_buff[24] = rd32(IGC_RCTL);
+ regs_buff[25] = rd32(IGC_RXCSUM);
+ regs_buff[26] = rd32(IGC_RLPML);
+ regs_buff[27] = rd32(IGC_RFCTL);
+
+ /* Transmit */
+ regs_buff[28] = rd32(IGC_TCTL);
+ regs_buff[29] = rd32(IGC_TIPG);
+
+ /* Wake Up */
+
+ /* MAC */
+
+ /* Statistics */
+ regs_buff[30] = adapter->stats.crcerrs;
+ regs_buff[31] = adapter->stats.algnerrc;
+ regs_buff[32] = adapter->stats.symerrs;
+ regs_buff[33] = adapter->stats.rxerrc;
+ regs_buff[34] = adapter->stats.mpc;
+ regs_buff[35] = adapter->stats.scc;
+ regs_buff[36] = adapter->stats.ecol;
+ regs_buff[37] = adapter->stats.mcc;
+ regs_buff[38] = adapter->stats.latecol;
+ regs_buff[39] = adapter->stats.colc;
+ regs_buff[40] = adapter->stats.dc;
+ regs_buff[41] = adapter->stats.tncrs;
+ regs_buff[42] = adapter->stats.sec;
+ regs_buff[43] = adapter->stats.htdpmc;
+ regs_buff[44] = adapter->stats.rlec;
+ regs_buff[45] = adapter->stats.xonrxc;
+ regs_buff[46] = adapter->stats.xontxc;
+ regs_buff[47] = adapter->stats.xoffrxc;
+ regs_buff[48] = adapter->stats.xofftxc;
+ regs_buff[49] = adapter->stats.fcruc;
+ regs_buff[50] = adapter->stats.prc64;
+ regs_buff[51] = adapter->stats.prc127;
+ regs_buff[52] = adapter->stats.prc255;
+ regs_buff[53] = adapter->stats.prc511;
+ regs_buff[54] = adapter->stats.prc1023;
+ regs_buff[55] = adapter->stats.prc1522;
+ regs_buff[56] = adapter->stats.gprc;
+ regs_buff[57] = adapter->stats.bprc;
+ regs_buff[58] = adapter->stats.mprc;
+ regs_buff[59] = adapter->stats.gptc;
+ regs_buff[60] = adapter->stats.gorc;
+ regs_buff[61] = adapter->stats.gotc;
+ regs_buff[62] = adapter->stats.rnbc;
+ regs_buff[63] = adapter->stats.ruc;
+ regs_buff[64] = adapter->stats.rfc;
+ regs_buff[65] = adapter->stats.roc;
+ regs_buff[66] = adapter->stats.rjc;
+ regs_buff[67] = adapter->stats.mgprc;
+ regs_buff[68] = adapter->stats.mgpdc;
+ regs_buff[69] = adapter->stats.mgptc;
+ regs_buff[70] = adapter->stats.tor;
+ regs_buff[71] = adapter->stats.tot;
+ regs_buff[72] = adapter->stats.tpr;
+ regs_buff[73] = adapter->stats.tpt;
+ regs_buff[74] = adapter->stats.ptc64;
+ regs_buff[75] = adapter->stats.ptc127;
+ regs_buff[76] = adapter->stats.ptc255;
+ regs_buff[77] = adapter->stats.ptc511;
+ regs_buff[78] = adapter->stats.ptc1023;
+ regs_buff[79] = adapter->stats.ptc1522;
+ regs_buff[80] = adapter->stats.mptc;
+ regs_buff[81] = adapter->stats.bptc;
+ regs_buff[82] = adapter->stats.tsctc;
+ regs_buff[83] = adapter->stats.iac;
+ regs_buff[84] = adapter->stats.rpthc;
+ regs_buff[85] = adapter->stats.hgptc;
+ regs_buff[86] = adapter->stats.hgorc;
+ regs_buff[87] = adapter->stats.hgotc;
+ regs_buff[88] = adapter->stats.lenerrs;
+ regs_buff[89] = adapter->stats.scvpc;
+ regs_buff[90] = adapter->stats.hrmpc;
+
+ for (i = 0; i < 4; i++)
+ regs_buff[91 + i] = rd32(IGC_SRRCTL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[95 + i] = rd32(IGC_PSRTYPE(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[99 + i] = rd32(IGC_RDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[103 + i] = rd32(IGC_RDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[107 + i] = rd32(IGC_RDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[111 + i] = rd32(IGC_RDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[115 + i] = rd32(IGC_RDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[119 + i] = rd32(IGC_RXDCTL(i));
+
+ for (i = 0; i < 10; i++)
+ regs_buff[123 + i] = rd32(IGC_EITR(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[139 + i] = rd32(IGC_RAL(i));
+ for (i = 0; i < 16; i++)
+ regs_buff[145 + i] = rd32(IGC_RAH(i));
+
+ for (i = 0; i < 4; i++)
+ regs_buff[149 + i] = rd32(IGC_TDBAL(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[152 + i] = rd32(IGC_TDBAH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[156 + i] = rd32(IGC_TDLEN(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[160 + i] = rd32(IGC_TDH(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[164 + i] = rd32(IGC_TDT(i));
+ for (i = 0; i < 4; i++)
+ regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
+}
+
+static u32 igc_get_msglevel(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void igc_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static int igc_nway_reset(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ return 0;
+}
+
+static u32 igc_get_link(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_mac_info *mac = &adapter->hw.mac;
+
+ /* If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igc_has_link(adapter);
+}
+
+static int igc_get_eeprom_len(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->hw.nvm.word_size * 2;
+}
+
+static int igc_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int first_word, last_word;
+ u16 *eeprom_buff;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ if (hw->nvm.type == igc_nvm_eeprom_spi) {
+ ret_val = hw->nvm.ops.read(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+ } else {
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
+ &eeprom_buff[i]);
+ if (ret_val)
+ break;
+ }
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+ eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int igc_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 *eeprom_buff;
+ void *ptr;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (hw->mac.type >= igc_i225 &&
+ !igc_get_flash_presence_i225(hw)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EFAULT;
+
+ max_len = hw->nvm.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
+ ret_val = hw->nvm.ops.read(hw, first_word, 1,
+ &eeprom_buff[0]);
+ ptr++;
+ }
+ if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) {
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
+ ret_val = hw->nvm.ops.read(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ret_val = hw->nvm.ops.write(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
+
+ /* Update the checksum if nvm write succeeded */
+ if (ret_val == 0)
+ hw->nvm.ops.update(hw);
+
+ /* check if need: igc_set_fw_version(adapter); */
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
+static void igc_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = IGC_MAX_RXD;
+ ring->tx_max_pending = IGC_MAX_TXD;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
+}
+
+static int igc_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_ring *temp_ring;
+ u16 new_rx_count, new_tx_count;
+ int i, err = 0;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD);
+ new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD);
+ new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD);
+ new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD);
+ new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if (new_tx_count == adapter->tx_ring_count &&
+ new_rx_count == adapter->rx_ring_count) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->count = new_rx_count;
+ adapter->tx_ring_count = new_tx_count;
+ adapter->rx_ring_count = new_rx_count;
+ goto clear_reset;
+ }
+
+ if (adapter->num_tx_queues > adapter->num_rx_queues)
+ temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
+ adapter->num_tx_queues));
+ else
+ temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
+ adapter->num_rx_queues));
+
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ igc_down(adapter);
+
+ /* We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the Tx and Rx ring structs.
+ */
+ if (new_tx_count != adapter->tx_ring_count) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igc_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = igc_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igc_free_tx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igc_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igc_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igc_ring));
+
+ temp_ring[i].count = new_rx_count;
+ err = igc_setup_rx_resources(&temp_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ igc_free_rx_resources(&temp_ring[i]);
+ }
+ goto err_setup;
+ }
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_free_rx_resources(adapter->rx_ring[i]);
+
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igc_ring));
+ }
+
+ adapter->rx_ring_count = new_rx_count;
+ }
+err_setup:
+ igc_up(adapter);
+ vfree(temp_ring);
+clear_reset:
+ clear_bit(__IGC_RESETTING, &adapter->state);
+ return err;
+}
+
+static void igc_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ pause->autoneg =
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (hw->fc.current_mode == igc_fc_rx_pause) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == igc_fc_tx_pause) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == igc_fc_full) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int igc_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int retval = 0;
+
+ adapter->fc_autoneg = pause->autoneg;
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+ hw->fc.requested_mode = igc_fc_default;
+ if (netif_running(adapter->netdev)) {
+ igc_down(adapter);
+ igc_up(adapter);
+ } else {
+ igc_reset(adapter);
+ }
+ } else {
+ if (pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_full;
+ else if (pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_rx_pause;
+ else if (!pause->rx_pause && pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_tx_pause;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ hw->fc.requested_mode = igc_fc_none;
+
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ retval = ((hw->phy.media_type == igc_media_type_copper) ?
+ igc_force_mac_fc(hw) : igc_setup_link(hw));
+ }
+
+ clear_bit(__IGC_RESETTING, &adapter->state);
+ return retval;
+}
+
+static int igc_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->rx_itr_setting <= 3)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
+ if (adapter->tx_itr_setting <= 3)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+ }
+
+ return 0;
+}
+
+static int igc_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -ENOTSUPP;
+
+ if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS ||
+ (ec->rx_coalesce_usecs > 3 &&
+ ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
+ ec->rx_coalesce_usecs == 2)
+ return -EINVAL;
+
+ if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS ||
+ (ec->tx_coalesce_usecs > 3 &&
+ ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
+ ec->tx_coalesce_usecs == 2)
+ return -EINVAL;
+
+ if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ /* If ITR is disabled, disable DMAC */
+ if (ec->rx_coalesce_usecs == 0) {
+ if (adapter->flags & IGC_FLAG_DMAC)
+ adapter->flags &= ~IGC_FLAG_DMAC;
+ }
+
+ /* convert to rate of irq's per second */
+ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+
+ /* convert to rate of irq's per second */
+ if (adapter->flags & IGC_FLAG_QUEUE_PAIRS)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->rx.ring)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ else
+ q_vector->itr_val = adapter->tx_itr_setting;
+ if (q_vector->itr_val && q_vector->itr_val <= 3)
+ q_vector->itr_val = IGC_START_ITR;
+ q_vector->set_itr = 1;
+ }
+
+ return 0;
+}
+
+void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg = IGC_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ while (i < IGC_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ wr32(reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static u32 igc_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGC_RETA_SIZE;
+}
+
+static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+ for (i = 0; i < IGC_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+static int igc_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 num_queues;
+ int i;
+
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
+ num_queues = adapter->rss_queues;
+
+ /* Verify user input. */
+ for (i = 0; i < IGC_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
+ return -EINVAL;
+
+ for (i = 0; i < IGC_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igc_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+
+static unsigned int igc_max_channels(struct igc_adapter *adapter)
+{
+ return igc_get_max_rss_queues(adapter);
+}
+
+static void igc_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = igc_max_channels(adapter);
+
+ /* Report info for other vector */
+ if (adapter->flags & IGC_FLAG_HAS_MSIX) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ ch->combined_count = adapter->rss_queues;
+}
+
+static int igc_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ unsigned int count = ch->combined_count;
+ unsigned int max_combined = 0;
+
+ /* Verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* Verify other_count is valid and has not been changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* Verify the number of channels doesn't exceed hw limits */
+ max_combined = igc_max_channels(adapter);
+ if (count > max_combined)
+ return -EINVAL;
+
+ if (count != adapter->rss_queues) {
+ adapter->rss_queues = count;
+ igc_set_flag_queue_pairs(adapter, max_combined);
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match the new configuration.
+ */
+ return igc_reinit_queues(adapter);
+ }
+
+ return 0;
+}
+
+static u32 igc_get_priv_flags(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IGC_FLAG_RX_LEGACY)
+ priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IGC_FLAG_RX_LEGACY;
+ if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX)
+ flags |= IGC_FLAG_RX_LEGACY;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static int igc_ethtool_begin(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ return 0;
+}
+
+static void igc_ethtool_complete(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static int igc_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 status;
+ u32 speed;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ /* supported link modes */
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full);
+
+ /* twisted pair */
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy.addr;
+
+ /* advertising link modes */
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+
+ /* set autoneg settings */
+ if (hw->mac.autoneg == 1) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Autoneg);
+ }
+
+ switch (hw->fc.requested_mode) {
+ case igc_fc_full:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ break;
+ case igc_fc_rx_pause:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+ break;
+ case igc_fc_tx_pause:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+ break;
+ default:
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ Asym_Pause);
+ }
+
+ status = rd32(IGC_STATUS);
+
+ if (status & IGC_STATUS_LU) {
+ if (status & IGC_STATUS_SPEED_1000) {
+ /* For I225, STATUS will indicate 1G speed in both
+ * 1 Gbps and 2.5 Gbps link modes.
+ * An additional bit is used
+ * to differentiate between 1 Gbps and 2.5 Gbps.
+ */
+ if (hw->mac.type == igc_i225 &&
+ (status & IGC_STATUS_SPEED_2500)) {
+ speed = SPEED_2500;
+ hw_dbg("2500 Mbs, ");
+ } else {
+ speed = SPEED_1000;
+ hw_dbg("1000 Mbs, ");
+ }
+ } else if (status & IGC_STATUS_SPEED_100) {
+ speed = SPEED_100;
+ hw_dbg("100 Mbs, ");
+ } else {
+ speed = SPEED_10;
+ hw_dbg("10 Mbs, ");
+ }
+ if ((status & IGC_STATUS_FD) ||
+ hw->phy.media_type != igc_media_type_copper)
+ cmd->base.duplex = DUPLEX_FULL;
+ else
+ cmd->base.duplex = DUPLEX_HALF;
+ } else {
+ speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.speed = speed;
+ if (hw->mac.autoneg)
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ else
+ cmd->base.autoneg = AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == igc_media_type_copper)
+ cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix;
+
+ return 0;
+}
+
+static int igc_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 advertising;
+
+ /* When adapter in resetting mode, autoneg/speed/duplex
+ * cannot be changed
+ */
+ if (igc_check_reset_block(hw)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot change link characteristics when reset is active.\n");
+ return -EINVAL;
+ }
+
+ /* MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO &&
+ cmd->base.autoneg != AUTONEG_ENABLE) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_advertised = advertising;
+ if (adapter->fc_autoneg)
+ hw->fc.requested_mode = igc_fc_default;
+ } else {
+ /* calling this overrides forced MDI setting */
+ dev_info(&adapter->pdev->dev,
+ "Force mode currently not supported\n");
+ }
+
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (cmd->base.eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl;
+ }
+
+ /* reset the link */
+ if (netif_running(adapter->netdev)) {
+ igc_down(adapter);
+ igc_up(adapter);
+ } else {
+ igc_reset(adapter);
+ }
+
+ clear_bit(__IGC_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+static const struct ethtool_ops igc_ethtool_ops = {
+ .get_drvinfo = igc_get_drvinfo,
+ .get_regs_len = igc_get_regs_len,
+ .get_regs = igc_get_regs,
+ .get_msglevel = igc_get_msglevel,
+ .set_msglevel = igc_set_msglevel,
+ .nway_reset = igc_nway_reset,
+ .get_link = igc_get_link,
+ .get_eeprom_len = igc_get_eeprom_len,
+ .get_eeprom = igc_get_eeprom,
+ .set_eeprom = igc_set_eeprom,
+ .get_ringparam = igc_get_ringparam,
+ .set_ringparam = igc_set_ringparam,
+ .get_pauseparam = igc_get_pauseparam,
+ .set_pauseparam = igc_set_pauseparam,
+ .get_coalesce = igc_get_coalesce,
+ .set_coalesce = igc_set_coalesce,
+ .get_rxfh_indir_size = igc_get_rxfh_indir_size,
+ .get_rxfh = igc_get_rxfh,
+ .set_rxfh = igc_set_rxfh,
+ .get_channels = igc_get_channels,
+ .set_channels = igc_set_channels,
+ .get_priv_flags = igc_get_priv_flags,
+ .set_priv_flags = igc_set_priv_flags,
+ .begin = igc_ethtool_begin,
+ .complete = igc_ethtool_complete,
+ .get_link_ksettings = igc_get_link_ksettings,
+ .set_link_ksettings = igc_set_link_ksettings,
+};
+
+void igc_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &igc_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index c50414f48f0d..7c88b7bd4799 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -55,6 +55,7 @@ enum igc_media_type {
enum igc_nvm_type {
igc_nvm_unknown = 0,
+ igc_nvm_eeprom_spi,
igc_nvm_flash_hw,
igc_nvm_invm,
};
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index f20183037fb2..11c75433fe22 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,8 @@
#define DRV_VERSION "0.0.1-k"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
static int debug = -1;
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -66,7 +68,7 @@ enum latency_range {
latency_invalid = 255
};
-static void igc_reset(struct igc_adapter *adapter)
+void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
@@ -150,7 +152,7 @@ static void igc_get_hw_control(struct igc_adapter *adapter)
*
* Free all transmit software resources
*/
-static void igc_free_tx_resources(struct igc_ring *tx_ring)
+void igc_free_tx_resources(struct igc_ring *tx_ring)
{
igc_clean_tx_ring(tx_ring);
@@ -261,7 +263,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
*
* Return 0 on success, negative on failure
*/
-static int igc_setup_tx_resources(struct igc_ring *tx_ring)
+int igc_setup_tx_resources(struct igc_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int size = 0;
@@ -381,7 +383,7 @@ static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
*
* Free all receive software resources
*/
-static void igc_free_rx_resources(struct igc_ring *rx_ring)
+void igc_free_rx_resources(struct igc_ring *rx_ring)
{
igc_clean_rx_ring(rx_ring);
@@ -418,7 +420,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter)
*
* Returns 0 on success, negative on failure
*/
-static int igc_setup_rx_resources(struct igc_ring *rx_ring)
+int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int size, desc_len;
@@ -1703,7 +1705,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
*/
-static void igc_up(struct igc_adapter *adapter)
+void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
int i = 0;
@@ -1748,7 +1750,7 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter)
* igc_down - Close the interface
* @adapter: board private structure
*/
-static void igc_down(struct igc_adapter *adapter)
+void igc_down(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
@@ -1810,7 +1812,7 @@ static void igc_down(struct igc_adapter *adapter)
igc_clean_all_rx_rings(adapter);
}
-static void igc_reinit_locked(struct igc_adapter *adapter)
+void igc_reinit_locked(struct igc_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
@@ -1922,7 +1924,7 @@ static void igc_configure(struct igc_adapter *adapter)
/**
* igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
- * @adapter: Pointer to adapter structure
+ * @adapter: address of board private structure
* @index: Index of the RAR entry which need to be synced with MAC table
*/
static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
@@ -2298,7 +2300,7 @@ static void igc_update_phy_info(struct timer_list *t)
* igc_has_link - check shared code for link and determine up/down
* @adapter: pointer to driver private info
*/
-static bool igc_has_link(struct igc_adapter *adapter)
+bool igc_has_link(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
bool link_active = false;
@@ -3501,6 +3503,57 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
+int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_mac_info *mac = &adapter->hw.mac;
+
+ mac->autoneg = 0;
+
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work
+ */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
+ switch (spd + dplx) {
+ case SPEED_10 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_10_HALF;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_10_FULL;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ mac->forced_speed_duplex = ADVERTISE_100_HALF;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ mac->forced_speed_duplex = ADVERTISE_100_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ break;
+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
+ goto err_inval;
+ case SPEED_2500 + DUPLEX_FULL:
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
+ break;
+ case SPEED_2500 + DUPLEX_HALF: /* not supported */
+ default:
+ goto err_inval;
+ }
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
+ return 0;
+
+err_inval:
+ dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
+}
+
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -3568,7 +3621,7 @@ static int igc_probe(struct pci_dev *pdev,
hw = &adapter->hw;
hw->back = adapter;
adapter->port_num = hw->bus.func;
- adapter->msg_enable = GENMASK(debug - 1, 0);
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
err = pci_save_state(pdev);
if (err)
@@ -3584,7 +3637,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
-
+ igc_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
netdev->mem_start = pci_resource_start(pdev, 0);
@@ -3744,8 +3797,8 @@ static struct pci_driver igc_driver = {
.remove = igc_remove,
};
-static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
- const u32 max_rss_queues)
+void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
{
/* Determine if we need to pair queues. */
/* If rss_queues > half of max_rss_queues, pair the queues in
@@ -3757,7 +3810,7 @@ static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
}
-static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
{
unsigned int max_rss_queues;
@@ -3837,6 +3890,32 @@ static int igc_sw_init(struct igc_adapter *adapter)
}
/**
+ * igc_reinit_queues - return error
+ * @adapter: pointer to adapter structure
+ */
+int igc_reinit_queues(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igc_close(netdev);
+
+ igc_reset_interrupt_capability(adapter);
+
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igc_open(netdev);
+
+ return err;
+}
+
+/**
* igc_get_hw_dev - return device
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 38e43e6fc1c7..4c8f96a9a148 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -152,7 +152,6 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
s32 igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
- u16 phy_data, offset, mask;
s32 ret_val;
switch (phy->type) {
@@ -161,15 +160,8 @@ s32 igc_check_downshift(struct igc_hw *hw)
/* speed downshift not supported */
phy->speed_downgraded = false;
ret_val = 0;
- goto out;
}
- ret_val = phy->ops.read_reg(hw, offset, &phy_data);
-
- if (!ret_val)
- phy->speed_downgraded = (phy_data & mask) ? true : false;
-
-out:
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index a1bd3216c906..5afe7a8d3faf 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -80,6 +80,9 @@
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+/* Redirection Table - RW Array */
+#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
+
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40))
@@ -188,7 +191,6 @@
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
-#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
/* Management registers */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1e49716f52bc..109f8de5a1c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1048,7 +1048,7 @@ mac_reset_top:
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- hw->mac.num_rar_entries = 128;
+ hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES;
hw->mac.ops.init_rx_addrs(hw);
/* Store the permanent SAN mac address */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 16066c2d5b3a..5c0c26fd8363 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1090,9 +1090,8 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
u32 val;
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
/* Enable the GMAC link status irq for this port */
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
@@ -1122,9 +1121,8 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
}
if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
@@ -1135,10 +1133,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
{
u32 val;
- if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ if (port->phylink ||
+ phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_MASK);
val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
writel(val, port->base + MVPP22_GMAC_INT_MASK);
@@ -1244,9 +1242,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
+ if (phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -2472,9 +2469,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
link = true;
}
} else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
- port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
- port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
val = readl(port->base + MVPP22_GMAC_INT_STAT);
if (val & MVPP22_GMAC_INT_STAT_LINK) {
event = true;
@@ -4566,8 +4562,7 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
* speed of 1000/2500Mbps in full duplex, so force 1000/2500
@@ -4587,9 +4582,8 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
if (phylink_test(state->advertising, Asym_Pause))
an |= MVPP2_GMAC_FC_ADV_ASM_EN;
- if (state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ if (phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII) {
an |= MVPP2_GMAC_IN_BAND_AUTONEG;
ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
@@ -4627,6 +4621,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
const struct phylink_link_state *state)
{
struct mvpp2_port *port = netdev_priv(dev);
+ bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */
if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) {
@@ -4636,28 +4631,32 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
+ if (change_interface) {
+ mvpp22_gop_mask_irq(port);
- if (port->priv->hw_version == MVPP22 &&
- port->phy_interface != state->interface) {
- port->phy_interface = state->interface;
+ if (port->priv->hw_version == MVPP22) {
+ port->phy_interface = state->interface;
- /* Reconfigure the serdes lanes */
- phy_power_off(port->comphy);
- mvpp22_mode_reconfigure(port);
+ /* Reconfigure the serdes lanes */
+ phy_power_off(port->comphy);
+ mvpp22_mode_reconfigure(port);
+ }
}
/* mac (re)configuration */
if (state->interface == PHY_INTERFACE_MODE_10GKR)
mvpp2_xlg_config(port, mode, state);
else if (phy_interface_mode_is_rgmii(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
mvpp2_gmac_config(port, mode, state);
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
+ if (change_interface)
+ mvpp22_gop_unmask_irq(port);
+
mvpp2_port_enable(port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 046948ead152..9e71f4d41b82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -25,7 +25,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
- if (!switchdev_port_same_parent_id(priv->netdev, dev) ||
+ if (!netdev_port_same_parent_id(priv->netdev, dev) ||
dst_is_lag_dev) {
*route_dev = uplink_dev;
*out_dev = *route_dev;
@@ -496,25 +496,21 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->mask);
void *misc_c = MLX5_ADDR_OF(fte_match_param,
spec->match_criteria,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters);
+ struct flow_match_ports enc_ports;
+
+ flow_rule_match_enc_ports(rule, &enc_ports);
/* Full udp dst port must be given */
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
- memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
+ memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
NL_SET_ERR_MSG_MOD(extack,
"VXLAN decap filter must include enc_dst_port condition");
netdev_warn(priv->netdev,
@@ -523,12 +519,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
}
/* udp dst port must be knonwn as a VXLAN port */
- if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
NL_SET_ERR_MSG_MOD(extack,
"Matched UDP port is not registered as a VXLAN port");
netdev_warn(priv->netdev,
"UDP port %d is not registered as a VXLAN port\n",
- be16_to_cpu(key->dst));
+ be16_to_cpu(enc_ports.key->dst));
return -EOPNOTSUPP;
}
@@ -536,26 +532,26 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
+ ntohs(enc_ports.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ ntohs(enc_ports.key->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
+ ntohs(enc_ports.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ ntohs(enc_ports.key->src));
/* match on VNI */
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
- be32_to_cpu(mask->keyid));
+ be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
- be32_to_cpu(key->keyid));
+ be32_to_cpu(enc_keyid.key->keyid));
}
return 0;
}
@@ -570,6 +566,7 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -587,21 +584,14 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
/* gre key */
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *mask = NULL;
- struct flow_dissector_key_keyid *key = NULL;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
MLX5_SET(fte_match_set_misc, misc_c,
- gre_key.key, be32_to_cpu(mask->keyid));
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
+ gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v,
- gre_key.key, be32_to_cpu(key->keyid));
+ gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 5d2e0c2f6624..0b1988b330f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -381,7 +381,8 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -398,20 +399,14 @@ static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
uplink_priv = netdev_priv(uplink_dev);
}
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = ETH_ALEN;
- if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
- ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
- } else {
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
+ ppid->id_len = ETH_ALEN;
+ if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
+ ether_addr_copy(ppid->id, uplink_upper->dev_addr);
+ } else {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
- ether_addr_copy(attr->u.ppid.id, rep->hw_id);
- }
- break;
- default:
- return -EOPNOTSUPP;
+ ether_addr_copy(ppid->id, rep->hw_id);
}
return 0;
@@ -1284,10 +1279,6 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
- .switchdev_port_attr_get = mlx5e_attr_get,
-};
-
static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
.ndo_open = mlx5e_vf_rep_open,
.ndo_stop = mlx5e_vf_rep_close,
@@ -1298,6 +1289,7 @@ static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_vf_rep_change_mtu,
+ .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
};
static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
@@ -1319,6 +1311,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
+ .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
};
bool mlx5e_eswitch_rep(struct net_device *netdev)
@@ -1393,8 +1386,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->watchdog_timeo = 15 * HZ;
- netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
-
netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 74159d39dd66..85c5dd7fc2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -38,7 +38,6 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
-#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -1309,12 +1308,9 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
-
- struct flow_dissector_key_control *enc_control =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- f->key);
- int err = 0;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_control enc_control;
+ int err;
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
headers_c, headers_v);
@@ -1324,79 +1320,70 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return err;
}
- if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- f->mask);
+ flow_rule_match_enc_control(rule, &enc_control);
+
+ if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(mask->src));
+ ntohl(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(key->src));
+ ntohl(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(mask->dst));
+ ntohl(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(key->dst));
+ ntohl(match.key->dst));
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
- } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
- f->mask);
+ } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_dissector_key_ip *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IP,
- f->key);
- struct flow_dissector_key_ip *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IP,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+ flow_rule_match_enc_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
- if (mask->ttl &&
+ if (match.mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB
(priv->mdev,
ft_field_support.outer_ipv4_ttl)) {
@@ -1437,12 +1424,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
*match_level = MLX5_MATCH_NONE;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -1461,20 +1450,18 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
- if ((dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- f->key);
- switch (key->addr_type) {
+ if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_enc_control(rule, &match);
+ switch (match.key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
if (parse_tunnel_attr(priv, spec, f, filter_dev))
@@ -1493,35 +1480,27 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
inner_headers);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
- ntohs(mask->n_proto));
+ ntohs(match.mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ntohs(key->n_proto));
+ ntohs(match.key->n_proto));
- if (mask->n_proto)
+ if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
- if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
- if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id ||
+ match.mask->vlan_priority ||
+ match.mask->vlan_tpid) {
+ if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
@@ -1533,11 +1512,15 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
cvlan_tag, 1);
}
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
+ match.mask->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
+ match.key->vlan_id);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
+ match.mask->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
+ match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
}
@@ -1547,17 +1530,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CVLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CVLAN,
- f->mask);
- if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
- if (key->vlan_tpid == htons(ETH_P_8021AD)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_id ||
+ match.mask->vlan_priority ||
+ match.mask->vlan_tpid) {
+ if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
@@ -1570,69 +1550,58 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
- mask->vlan_id);
+ match.mask->vlan_id);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
- key->vlan_id);
+ match.key->vlan_id);
MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
- mask->vlan_priority);
+ match.mask->vlan_priority);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
- key->vlan_priority);
+ match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ flow_rule_match_eth_addrs(rule, &match);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
- mask->dst);
+ match.mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
- key->dst);
+ match.key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
- mask->src);
+ match.mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
- key->src);
+ match.key->src);
- if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+ if (!is_zero_ether_addr(match.mask->src) ||
+ !is_zero_ether_addr(match.mask->dst))
*match_level = MLX5_MATCH_L2;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- struct flow_dissector_key_control *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->mask);
- addr_type = key->addr_type;
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
- if (mask->flags & FLOW_DIS_FIRST_FRAG)
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
return -EOPNOTSUPP;
- if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
- key->flags & FLOW_DIS_IS_FRAGMENT);
+ match.key->flags & FLOW_DIS_IS_FRAGMENT);
/* the HW doesn't need L3 inline to match on frag=no */
- if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
+ if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
*match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
@@ -1640,102 +1609,85 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- ip_proto = key->ip_proto;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
- mask->ip_proto);
+ match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
- key->ip_proto);
+ match.key->ip_proto);
- if (mask->ip_proto)
+ if (match.mask->ip_proto)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
+ struct flow_match_ipv4_addrs match;
+ flow_rule_match_ipv4_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &mask->src, sizeof(mask->src));
+ &match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &key->src, sizeof(key->src));
+ &match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &mask->dst, sizeof(mask->dst));
+ &match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &key->dst, sizeof(key->dst));
+ &match.key->dst, sizeof(match.key->dst));
- if (mask->src || mask->dst)
+ if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+ flow_rule_match_ipv6_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &mask->src, sizeof(mask->src));
+ &match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &key->src, sizeof(key->src));
+ &match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &mask->dst, sizeof(mask->dst));
+ &match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &key->dst, sizeof(key->dst));
+ &match.key->dst, sizeof(match.key->dst));
- if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
- ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
+ if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
+ ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
*match_level = MLX5_MATCH_L3;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->key);
- struct flow_dissector_key_ip *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+ flow_rule_match_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
- if (mask->ttl &&
+ if (match.mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_ipv4_ttl)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1743,44 +1695,39 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (mask->tos || mask->ttl)
+ if (match.mask->tos || match.mask->ttl)
*match_level = MLX5_MATCH_L3;
}
/* *** L3 attributes parsing up to here *** */
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
switch (ip_proto) {
case IPPROTO_TCP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- tcp_sport, ntohs(mask->src));
+ tcp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- tcp_sport, ntohs(key->src));
+ tcp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- tcp_dport, ntohs(mask->dst));
+ tcp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- tcp_dport, ntohs(key->dst));
+ tcp_dport, ntohs(match.key->dst));
break;
case IPPROTO_UDP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_sport, ntohs(mask->src));
+ udp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_sport, ntohs(key->src));
+ udp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_dport, ntohs(mask->dst));
+ udp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_dport, ntohs(key->dst));
+ udp_dport, ntohs(match.key->dst));
break;
default:
NL_SET_ERR_MSG_MOD(extack,
@@ -1790,26 +1737,20 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
return -EINVAL;
}
- if (mask->src || mask->dst)
+ if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L4;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
- struct flow_dissector_key_tcp *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->key);
- struct flow_dissector_key_tcp *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+ flow_rule_match_tcp(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
- ntohs(mask->flags));
+ ntohs(match.mask->flags));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
- ntohs(key->flags));
+ ntohs(match.key->flags));
- if (mask->flags)
+ if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
@@ -1862,27 +1803,32 @@ struct pedit_headers {
struct udphdr udp;
};
+struct pedit_headers_action {
+ struct pedit_headers vals;
+ struct pedit_headers masks;
+ u32 pedits;
+};
+
static int pedit_header_offsets[] = {
- [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
- [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers *masks,
- struct pedit_headers *vals)
+ struct pedit_headers_action *hdrs)
{
u32 *curr_pmask, *curr_pval;
- if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
+ if (hdr_type >= 2)
goto out_err;
- curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
- curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
+ curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
if (*curr_pmask & mask) /* disallow acting twice on the same location */
goto out_err;
@@ -1938,8 +1884,7 @@ static struct mlx5_fields fields[] = {
* max from the SW pedit action. On success, it says how many HW actions were
* actually parsed.
*/
-static int offload_pedit_fields(struct pedit_headers *masks,
- struct pedit_headers *vals,
+static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
@@ -1954,10 +1899,10 @@ static int offload_pedit_fields(struct pedit_headers *masks,
__be16 mask_be16;
void *action;
- set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
- add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
- set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
- add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
+ set_masks = &hdrs[0].masks;
+ add_masks = &hdrs[1].masks;
+ set_vals = &hdrs[0].vals;
+ add_vals = &hdrs[1].vals;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
action = parse_attr->mod_hdr_actions;
@@ -2053,12 +1998,14 @@ static int offload_pedit_fields(struct pedit_headers *masks,
}
static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
- const struct tc_action *a, int namespace,
+ struct pedit_headers_action *hdrs,
+ int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
int nkeys, action_size, max_actions;
- nkeys = tcf_pedit_nkeys(a);
+ nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
+ hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
@@ -2080,55 +2027,58 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct tc_action *a, int namespace,
+ const struct flow_action_entry *act, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
- struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
- int nkeys, i, err = -EOPNOTSUPP;
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ int err = -EOPNOTSUPP;
u32 mask, val, offset;
- u8 cmd, htype;
+ u8 htype;
- nkeys = tcf_pedit_nkeys(a);
+ htype = act->mangle.htype;
+ err = -EOPNOTSUPP; /* can't be all optimistic */
- memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
- memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
+ if (htype == FLOW_ACT_MANGLE_UNSPEC) {
+ NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
+ goto out_err;
+ }
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
- cmd = tcf_pedit_cmd(a, i);
- err = -EOPNOTSUPP; /* can't be all optimistic */
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
- if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- NL_SET_ERR_MSG_MOD(extack,
- "legacy pedit isn't offloaded");
- goto out_err;
- }
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
+ if (err)
+ goto out_err;
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
- goto out_err;
- }
+ hdrs[cmd].pedits++;
- mask = tcf_pedit_mask(a, i);
- val = tcf_pedit_val(a, i);
- offset = tcf_pedit_offset(a, i);
+ return 0;
+out_err:
+ return err;
+}
- err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
- if (err)
- goto out_err;
- }
+static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ struct pedit_headers *cmd_masks;
+ int err;
+ u8 cmd;
- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+ err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
if (err)
goto out_err;
- err = offload_pedit_fields(masks, vals, parse_attr, extack);
+ err = offload_pedit_fields(hdrs, parse_attr, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
- cmd_masks = &masks[cmd];
+ cmd_masks = &hdrs[cmd].masks;
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
NL_SET_ERR_MSG_MOD(extack,
"attempt to offload an unsupported field");
@@ -2178,15 +2128,15 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
- struct tcf_exts *exts,
+ struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
- const struct tc_action *a;
+ const struct flow_action_entry *act;
bool modify_ip_header;
u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
- int nkeys, i;
+ int i;
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
@@ -2196,20 +2146,16 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
goto out_ok;
modify_ip_header = false;
- tcf_exts_for_each_action(i, a, exts) {
- int k;
-
- if (!is_tcf_pedit(a))
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id != FLOW_ACTION_MANGLE &&
+ act->id != FLOW_ACTION_ADD)
continue;
- nkeys = tcf_pedit_nkeys(a);
- for (k = 0; k < nkeys; k++) {
- htype = tcf_pedit_htype(a, k);
- if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
- htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
- modify_ip_header = true;
- break;
- }
+ htype = act->mangle.htype;
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
+ htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ modify_ip_header = true;
+ break;
}
}
@@ -2227,7 +2173,7 @@ out_ok:
}
static bool actions_match_supported(struct mlx5e_priv *priv,
- struct tcf_exts *exts,
+ struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -2244,7 +2190,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(&parse_attr->spec, exts,
+ return modify_header_match_supported(&parse_attr->spec,
+ flow_action,
extack);
return true;
@@ -2264,52 +2211,50 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
-static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+static int parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- const struct tc_action *a;
+ struct pedit_headers_action hdrs[2] = {};
+ const struct flow_action_entry *act;
u32 action = 0;
int err, i;
- if (!tcf_exts_has_actions(exts))
+ if (!flow_action_has_entries(flow_action))
return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- tcf_exts_for_each_action(i, a, exts) {
- if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.flow_counter))
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- continue;
- }
-
- if (is_tcf_pedit(a)) {
- err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, extack);
+ break;
+ case FLOW_ACTION_MANGLE:
+ case FLOW_ACTION_ADD:
+ err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr, hdrs, extack);
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- continue;
- }
-
- if (is_tcf_csum(a)) {
+ break;
+ case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a),
+ act->csum_flags,
extack))
- continue;
+ break;
return -EOPNOTSUPP;
- }
-
- if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *peer_dev = tcf_mirred_dev(a);
+ case FLOW_ACTION_REDIRECT: {
+ struct net_device *peer_dev = act->dev;
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
@@ -2324,11 +2269,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
peer_dev->name);
return -EINVAL;
}
- continue;
- }
-
- if (is_tcf_skbedit_mark(a)) {
- u32 mark = tcf_skbedit_mark(a);
+ }
+ break;
+ case FLOW_ACTION_MARK: {
+ u32 mark = act->mark;
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2338,14 +2282,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->flow_tag = mark;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- continue;
+ }
+ break;
+ default:
+ return -EINVAL;
}
+ }
- return -EINVAL;
+ if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
+ hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr, hdrs, extack);
+ if (err)
+ return err;
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -2450,7 +2403,7 @@ out_err:
}
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
- const struct tc_action *a,
+ const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
u32 *action)
{
@@ -2459,7 +2412,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
return -EOPNOTSUPP;
- if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
MLX5_FS_VLAN_DEPTH))
@@ -2469,10 +2423,11 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
} else {
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
}
- } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
- attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
- attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ attr->vlan_vid[vlan_idx] = act->vlan.vid;
+ attr->vlan_prio[vlan_idx] = act->vlan.prio;
+ attr->vlan_proto[vlan_idx] = act->vlan.proto;
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
@@ -2484,13 +2439,15 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
- tcf_vlan_push_prio(a)))
+ (act->vlan.proto != htons(ETH_P_8021Q) ||
+ act->vlan.prio))
return -EOPNOTSUPP;
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
- } else { /* action is TCA_VLAN_ACT_MODIFY */
+ break;
+ default:
+ /* action is FLOW_ACT_VLAN_MANGLE */
return -EOPNOTSUPP;
}
@@ -2499,58 +2456,56 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct ip_tunnel_info *info = NULL;
- const struct tc_action *a;
+ const struct ip_tunnel_info *info = NULL;
+ const struct flow_action_entry *act;
bool encap = false;
u32 action = 0;
int err, i;
- if (!tcf_exts_has_actions(exts))
+ if (!flow_action_has_entries(flow_action))
return -EINVAL;
attr->in_rep = rpriv->rep;
attr->in_mdev = priv->mdev;
- tcf_exts_for_each_action(i, a, exts) {
- if (is_tcf_gact_shot(a)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- continue;
- }
-
- if (is_tcf_pedit(a)) {
- err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, extack);
+ break;
+ case FLOW_ACTION_MANGLE:
+ case FLOW_ACTION_ADD:
+ err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
+ parse_attr, hdrs, extack);
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->split_count = attr->out_count;
- continue;
- }
-
- if (is_tcf_csum(a)) {
+ break;
+ case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a),
- extack))
- continue;
+ act->csum_flags, extack))
+ break;
return -EOPNOTSUPP;
- }
-
- if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED: {
struct mlx5e_priv *out_priv;
struct net_device *out_dev;
- out_dev = tcf_mirred_dev(a);
+ out_dev = act->dev;
if (!out_dev) {
/* out_dev is NULL when filters with
* non-existing mirred device are replayed to
@@ -2569,8 +2524,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (switchdev_port_same_parent_id(priv->netdev,
- out_dev) ||
+ if (netdev_port_same_parent_id(priv->netdev,
+ out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
@@ -2615,35 +2570,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
priv->netdev->name, out_dev->name);
return -EINVAL;
}
- continue;
- }
-
- if (is_tcf_tunnel_set(a)) {
- info = tcf_tunnel_info(a);
+ }
+ break;
+ case FLOW_ACTION_TUNNEL_ENCAP:
+ info = act->tunnel;
if (info)
encap = true;
else
return -EOPNOTSUPP;
- continue;
- }
-
- if (is_tcf_vlan(a)) {
- err = parse_tc_vlan_action(priv, a, attr, &action);
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ err = parse_tc_vlan_action(priv, act, attr, &action);
if (err)
return err;
attr->split_count = attr->out_count;
- continue;
- }
-
- if (is_tcf_tunnel_release(a)) {
+ break;
+ case FLOW_ACTION_TUNNEL_DECAP:
action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
- continue;
- }
-
- if (is_tcf_gact_goto_chain(a)) {
- u32 dest_chain = tcf_gact_goto_chain_index(a);
+ break;
+ case FLOW_ACTION_GOTO: {
+ u32 dest_chain = act->chain_index;
u32 max_chain = mlx5_eswitch_get_chain_range(esw);
if (dest_chain <= attr->chain) {
@@ -2656,15 +2605,23 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = dest_chain;
-
- continue;
+ break;
+ }
+ default:
+ return -EINVAL;
}
+ }
- return -EINVAL;
+ if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
+ hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ parse_attr, hdrs, extack);
+ if (err)
+ return err;
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->dest_chain) {
@@ -2775,6 +2732,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -2796,7 +2754,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
flow->esw_attr->chain = f->common.chain_index;
flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
- err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
+ err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -2912,6 +2870,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -2934,7 +2893,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
+ err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -3092,7 +3051,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
- tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b2df7c3749fb..b354f3ee94c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -619,7 +619,8 @@ static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steer
if (ret) {
kmem_cache_free(steering->fgs_cache, fg);
return ERR_PTR(ret);
-}
+ }
+
ida_init(&fg->fte_allocator);
fg->mask.match_criteria_enable = match_criteria_enable;
memcpy(&fg->mask.match_criteria, match_criteria,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index ddedf8ab5b64..4f6fa515394e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1460,13 +1460,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
if (trans->retries)
dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
- if (err)
+ if (err) {
dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
trans->tid, trans->reg->id,
mlxsw_reg_id_str(trans->reg->id),
mlxsw_core_reg_access_type_str(trans->type),
trans->emad_status,
mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ trace_devlink_hwerr(priv_to_devlink(mlxsw_core),
+ trans->emad_status,
+ mlxsw_emad_op_tlv_status_str(trans->emad_status));
+ }
list_del(&trans->bulk_list);
kfree_rcu(trans, rcu);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a88169738b4a..8dd808b7f931 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1700,6 +1700,18 @@ static int mlxsw_sp_set_features(struct net_device *dev,
mlxsw_sp_feature_hw_tc);
}
+static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ ppid->id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -1715,6 +1727,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
.ndo_set_features = mlxsw_sp_set_features,
+ .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index a69e3462b65e..2649d128a57c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
{
u8 ethertype;
- if (action == TCA_VLAN_ACT_MODIFY) {
+ if (action == FLOW_ACTION_VLAN_MANGLE) {
switch (proto) {
case ETH_P_8021Q:
ethertype = 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ff072358d950..9af9f5c1b25c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -17,13 +17,13 @@
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_acl_rule_info *rulei,
- struct tcf_exts *exts,
+ struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
- const struct tc_action *a;
+ const struct flow_action_entry *act;
int err, i;
- if (!tcf_exts_has_actions(exts))
+ if (!flow_action_has_entries(flow_action))
return 0;
/* Count action is inserted first */
@@ -31,27 +31,31 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- tcf_exts_for_each_action(i, a, exts) {
- if (is_tcf_gact_ok(a)) {
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
return err;
}
- } else if (is_tcf_gact_shot(a)) {
+ break;
+ case FLOW_ACTION_DROP:
err = mlxsw_sp_acl_rulei_act_drop(rulei);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
}
- } else if (is_tcf_gact_trap(a)) {
+ break;
+ case FLOW_ACTION_TRAP:
err = mlxsw_sp_acl_rulei_act_trap(rulei);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
return err;
}
- } else if (is_tcf_gact_goto_chain(a)) {
- u32 chain_index = tcf_gact_goto_chain_index(a);
+ break;
+ case FLOW_ACTION_GOTO: {
+ u32 chain_index = act->chain_index;
struct mlxsw_sp_acl_ruleset *ruleset;
u16 group_id;
@@ -67,7 +71,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
return err;
}
- } else if (is_tcf_mirred_egress_redirect(a)) {
+ }
+ break;
+ case FLOW_ACTION_REDIRECT: {
struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
@@ -79,29 +85,34 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- out_dev = tcf_mirred_dev(a);
+ out_dev = act->dev;
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
out_dev, extack);
if (err)
return err;
- } else if (is_tcf_mirred_egress_mirror(a)) {
- struct net_device *out_dev = tcf_mirred_dev(a);
+ }
+ break;
+ case FLOW_ACTION_MIRRED: {
+ struct net_device *out_dev = act->dev;
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
if (err)
return err;
- } else if (is_tcf_vlan(a)) {
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
- u32 action = tcf_vlan_action(a);
- u8 prio = tcf_vlan_push_prio(a);
- u16 vid = tcf_vlan_push_vid(a);
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP: {
+ u16 proto = be16_to_cpu(act->vlan.proto);
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
- action, vid,
+ act->id, vid,
proto, prio, extack);
- } else {
+ }
+ default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
return -EOPNOTSUPP;
@@ -113,59 +124,49 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
- struct flow_dissector_key_ipv4_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- struct flow_dissector_key_ipv4_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(f->rule, &match);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
- (char *) &key->src,
- (char *) &mask->src, 4);
+ (char *) &match.key->src,
+ (char *) &match.mask->src, 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
- (char *) &key->dst,
- (char *) &mask->dst, 4);
+ (char *) &match.key->dst,
+ (char *) &match.mask->dst, 4);
}
static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
- struct flow_dissector_key_ipv6_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- struct flow_dissector_key_ipv6_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(f->rule, &match);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
- &key->src.s6_addr[0x0],
- &mask->src.s6_addr[0x0], 4);
+ &match.key->src.s6_addr[0x0],
+ &match.mask->src.s6_addr[0x0], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
- &key->src.s6_addr[0x4],
- &mask->src.s6_addr[0x4], 4);
+ &match.key->src.s6_addr[0x4],
+ &match.mask->src.s6_addr[0x4], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
- &key->src.s6_addr[0x8],
- &mask->src.s6_addr[0x8], 4);
+ &match.key->src.s6_addr[0x8],
+ &match.mask->src.s6_addr[0x8], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
- &key->src.s6_addr[0xC],
- &mask->src.s6_addr[0xC], 4);
+ &match.key->src.s6_addr[0xC],
+ &match.mask->src.s6_addr[0xC], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
- &key->dst.s6_addr[0x0],
- &mask->dst.s6_addr[0x0], 4);
+ &match.key->dst.s6_addr[0x0],
+ &match.mask->dst.s6_addr[0x0], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
- &key->dst.s6_addr[0x4],
- &mask->dst.s6_addr[0x4], 4);
+ &match.key->dst.s6_addr[0x4],
+ &match.mask->dst.s6_addr[0x4], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
- &key->dst.s6_addr[0x8],
- &mask->dst.s6_addr[0x8], 4);
+ &match.key->dst.s6_addr[0x8],
+ &match.mask->dst.s6_addr[0x8], 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
- &key->dst.s6_addr[0xC],
- &mask->dst.s6_addr[0xC], 4);
+ &match.key->dst.s6_addr[0xC],
+ &match.mask->dst.s6_addr[0xC], 4);
}
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -173,9 +174,10 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
struct tc_cls_flower_offload *f,
u8 ip_proto)
{
- struct flow_dissector_key_ports *key, *mask;
+ const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_ports match;
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
return 0;
if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
@@ -184,16 +186,13 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
+ flow_rule_match_ports(rule, &match);
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
- ntohs(key->dst), ntohs(mask->dst));
+ ntohs(match.key->dst),
+ ntohs(match.mask->dst));
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
- ntohs(key->src), ntohs(mask->src));
+ ntohs(match.key->src),
+ ntohs(match.mask->src));
return 0;
}
@@ -202,9 +201,10 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
struct tc_cls_flower_offload *f,
u8 ip_proto)
{
- struct flow_dissector_key_tcp *key, *mask;
+ const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_tcp match;
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
return 0;
if (ip_proto != IPPROTO_TCP) {
@@ -213,14 +213,11 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- f->mask);
+ flow_rule_match_tcp(rule, &match);
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
- ntohs(key->flags), ntohs(mask->flags));
+ ntohs(match.key->flags),
+ ntohs(match.mask->flags));
return 0;
}
@@ -229,9 +226,10 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
struct tc_cls_flower_offload *f,
u16 n_proto)
{
- struct flow_dissector_key_ip *key, *mask;
+ const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_match_ip match;
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
return 0;
if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
@@ -240,20 +238,18 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IP,
- f->mask);
+ flow_rule_match_ip(rule, &match);
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
- key->ttl, mask->ttl);
+ match.key->ttl, match.mask->ttl);
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
- key->tos & 0x3, mask->tos & 0x3);
+ match.key->tos & 0x3,
+ match.mask->tos & 0x3);
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
- key->tos >> 6, mask->tos >> 6);
+ match.key->tos >> 6,
+ match.mask->tos >> 6);
return 0;
}
@@ -263,13 +259,15 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct tc_cls_flower_offload *f)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
u16 addr_type = 0;
u8 ip_proto = 0;
int err;
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
@@ -286,25 +284,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- f->key);
- addr_type = key->addr_type;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- struct flow_dissector_key_basic *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->mask);
- n_proto_key = ntohs(key->n_proto);
- n_proto_mask = ntohs(mask->n_proto);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
if (n_proto_key == ETH_P_ALL) {
n_proto_key = 0;
@@ -314,60 +306,53 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
MLXSW_AFK_ELEMENT_ETHERTYPE,
n_proto_key, n_proto_mask);
- ip_proto = key->ip_proto;
+ ip_proto = match.key->ip_proto;
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_IP_PROTO,
- key->ip_proto, mask->ip_proto);
+ match.key->ip_proto,
+ match.mask->ip_proto);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- struct flow_dissector_key_eth_addrs *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->key);
- struct flow_dissector_key_eth_addrs *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+ flow_rule_match_eth_addrs(rule, &match);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_DMAC_32_47,
- key->dst, mask->dst, 2);
+ match.key->dst,
+ match.mask->dst, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_DMAC_0_31,
- key->dst + 2, mask->dst + 2, 4);
+ match.key->dst + 2,
+ match.mask->dst + 2, 4);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_SMAC_32_47,
- key->src, mask->src, 2);
+ match.key->src,
+ match.mask->src, 2);
mlxsw_sp_acl_rulei_keymask_buf(rulei,
MLXSW_AFK_ELEMENT_SMAC_0_31,
- key->src + 2, mask->src + 2, 4);
+ match.key->src + 2,
+ match.mask->src + 2, 4);
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->key);
- struct flow_dissector_key_vlan *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+ flow_rule_match_vlan(rule, &match);
if (mlxsw_sp_acl_block_is_egress_bound(block)) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
- if (mask->vlan_id != 0)
+ if (match.mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
- key->vlan_id,
- mask->vlan_id);
- if (mask->vlan_priority != 0)
+ match.key->vlan_id,
+ match.mask->vlan_id);
+ if (match.mask->vlan_priority != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_PCP,
- key->vlan_priority,
- mask->vlan_priority);
+ match.key->vlan_priority,
+ match.mask->vlan_priority);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -387,7 +372,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
+ &f->rule->action,
f->common.extack);
}
@@ -486,7 +472,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rule_get_stats;
- tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 230e1f6e192b..6754061d9b72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -364,6 +364,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
+ MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
/* This is a special case of local delivery, where a packet should be
* decapsulated on reception. Note that there is no corresponding ENCAP,
@@ -3928,6 +3929,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
return !!nh_group->adj_index_valid;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
return !!nh_group->nh_rif;
+ case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
return true;
@@ -3963,6 +3965,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
int i;
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
@@ -4004,7 +4007,8 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
- if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
+ if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
return;
@@ -4172,6 +4176,19 @@ static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
+static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -4211,6 +4228,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
@@ -4279,8 +4298,10 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
case RTN_BROADCAST:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
return 0;
+ case RTN_BLACKHOLE:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
+ return 0;
case RTN_UNREACHABLE: /* fall through */
- case RTN_BLACKHOLE: /* fall through */
case RTN_PROHIBIT:
/* Packets hitting these routes need to be trapped, but
* can do so with a lower priority than packets directed
@@ -5229,6 +5250,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
*/
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ else if (rt->fib6_type == RTN_BLACKHOLE)
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a4a9fe992193..95e37de3e48f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -451,11 +451,6 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
- attr->u.ppid.id_len);
- break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
&attr->u.brport_flags);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 2d4f213e154d..533fe6235b7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -11,7 +11,6 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
-#include <net/switchdev.h>
#include "pci.h"
#include "core.h"
@@ -390,6 +389,18 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
name, len);
}
+static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ ppid->id_len = sizeof(mlxsw_sx->hw_id);
+ memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_open = mlxsw_sx_port_open,
.ndo_stop = mlxsw_sx_port_stop,
@@ -397,6 +408,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
.ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
+ .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id,
};
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
@@ -901,28 +913,6 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
.set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
};
-static int mlxsw_sx_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
- struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
- memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
- .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
-};
-
static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
{
char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
@@ -1034,7 +1024,6 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
- dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
if (err) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c6a575eb0ff5..195306d05bcd 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -916,6 +916,18 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
+static int ocelot_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ppid->id_len = sizeof(ocelot->base_mac);
+ memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -930,6 +942,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
+ .ndo_get_port_parent_id = ocelot_get_port_parent_id,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1013,25 +1026,6 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int ocelot_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- struct ocelot_port *ocelot_port = netdev_priv(dev);
- struct ocelot *ocelot = ocelot_port->ocelot;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(ocelot->base_mac);
- memcpy(&attr->u.ppid.id, &ocelot->base_mac,
- attr->u.ppid.id_len);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
struct switchdev_trans *trans,
u8 state)
@@ -1331,7 +1325,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
}
static const struct switchdev_ops ocelot_port_switchdev_ops = {
- .switchdev_port_attr_get = ocelot_port_attr_get,
.switchdev_port_attr_set = ocelot_port_attr_set,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 8d54b36afee8..583e97c99e68 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -3,7 +3,6 @@
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
-#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -37,7 +36,7 @@ static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
static void
nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
- const struct tc_action *action)
+ const struct flow_action_entry *act)
{
size_t act_size = sizeof(struct nfp_fl_push_vlan);
u16 tmp_push_vlan_tci;
@@ -45,17 +44,17 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
push_vlan->reserved = 0;
- push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
+ push_vlan->vlan_tpid = act->vlan.proto;
tmp_push_vlan_tci =
- FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
NFP_FL_PUSH_VLAN_CFI;
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
static int
-nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
+nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_flow, int act_len)
{
size_t act_size = sizeof(struct nfp_fl_pre_lag);
@@ -63,7 +62,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
struct net_device *out_dev;
int err;
- out_dev = tcf_mirred_dev(action);
+ out_dev = act->dev;
if (!out_dev || !netif_is_lag_master(out_dev))
return 0;
@@ -92,7 +91,8 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
- const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
+ const struct flow_action_entry *act,
+ struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
{
@@ -104,7 +104,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
- out_dev = tcf_mirred_dev(action);
+ out_dev = act->dev;
if (!out_dev)
return -EOPNOTSUPP;
@@ -137,7 +137,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ if (!netdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
}
@@ -155,9 +155,9 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
static enum nfp_flower_tun_type
nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
- const struct tc_action *action)
+ const struct flow_action_entry *act)
{
- struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+ const struct ip_tunnel_info *tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
switch (tun->key.tp_dst) {
@@ -195,9 +195,9 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
static int
nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
- const struct tc_action *action)
+ const struct flow_action_entry *act)
{
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
int opt_len, opt_cnt, act_start, tot_push_len;
u8 *src = ip_tunnel_info_opts(ip_tun);
@@ -259,13 +259,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
static int
nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
struct nfp_fl_set_ipv4_udp_tun *set_tun,
- const struct tc_action *action,
+ const struct flow_action_entry *act,
struct nfp_fl_pre_tunnel *pre_tun,
enum nfp_flower_tun_type tun_type,
struct net_device *netdev)
{
size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ const struct ip_tunnel_info *ip_tun = act->tunnel;
struct nfp_flower_priv *priv = app->priv;
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
@@ -345,7 +345,7 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
}
static int
-nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_eth(const struct flow_action_entry *act, int idx, u32 off,
struct nfp_fl_set_eth *set_eth)
{
u32 exact, mask;
@@ -353,8 +353,8 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
if (off + 4 > ETH_ALEN * 2)
return -EOPNOTSUPP;
- mask = ~tcf_pedit_mask(action, idx);
- exact = tcf_pedit_val(action, idx);
+ mask = ~act->mangle.mask;
+ exact = act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -376,7 +376,7 @@ struct ipv4_ttl_word {
};
static int
-nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_ip4(const struct flow_action_entry *act, int idx, u32 off,
struct nfp_fl_set_ip4_addrs *set_ip_addr,
struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
{
@@ -387,8 +387,8 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
__be32 exact, mask;
/* We are expecting tcf_pedit to return a big endian value */
- mask = (__force __be32)~tcf_pedit_mask(action, idx);
- exact = (__force __be32)tcf_pedit_val(action, idx);
+ mask = (__force __be32)~act->mangle.mask;
+ exact = (__force __be32)act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -505,7 +505,7 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
}
static int
-nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_ip6(const struct flow_action_entry *act, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
struct nfp_fl_set_ipv6_addr *ip_src,
struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
@@ -515,8 +515,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
u8 word;
/* We are expecting tcf_pedit to return a big endian value */
- mask = (__force __be32)~tcf_pedit_mask(action, idx);
- exact = (__force __be32)tcf_pedit_val(action, idx);
+ mask = (__force __be32)~act->mangle.mask;
+ exact = (__force __be32)act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -541,7 +541,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
}
static int
-nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+nfp_fl_set_tport(const struct flow_action_entry *act, int idx, u32 off,
struct nfp_fl_set_tport *set_tport, int opcode)
{
u32 exact, mask;
@@ -549,8 +549,8 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
if (off)
return -EOPNOTSUPP;
- mask = ~tcf_pedit_mask(action, idx);
- exact = tcf_pedit_val(action, idx);
+ mask = ~act->mangle.mask;
+ exact = act->mangle.val;
if (exact & ~mask)
return -EOPNOTSUPP;
@@ -584,20 +584,22 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
}
static int
-nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+nfp_fl_pedit(const struct flow_action_entry *act,
+ struct tc_cls_flower_offload *flow,
char *nfp_action, int *a_len, u32 *csum_updated)
{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
+ enum flow_action_mangle_base htype;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
- enum pedit_header_type htype;
- int idx, nkeys, err;
size_t act_size = 0;
- u32 offset, cmd;
u8 ip_proto = 0;
+ int idx, err;
+ u32 offset;
memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
@@ -606,50 +608,41 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
memset(&set_ip_addr, 0, sizeof(set_ip_addr));
memset(&set_tport, 0, sizeof(set_tport));
memset(&set_eth, 0, sizeof(set_eth));
- nkeys = tcf_pedit_nkeys(action);
- for (idx = 0; idx < nkeys; idx++) {
- cmd = tcf_pedit_cmd(action, idx);
- htype = tcf_pedit_htype(action, idx);
- offset = tcf_pedit_offset(action, idx);
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
- return -EOPNOTSUPP;
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- err = nfp_fl_set_eth(action, idx, offset, &set_eth);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
- &set_ip_ttl_tos);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
- &set_ip6_src, &set_ip6_tc_hl_fl);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- err = nfp_fl_set_tport(action, idx, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- err = nfp_fl_set_tport(action, idx, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (err)
- return err;
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(act, idx, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(act, idx, offset, &set_ip_addr,
+ &set_ip_ttl_tos);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(act, idx, offset, &set_ip6_dst,
+ &set_ip6_src, &set_ip6_tc_hl_fl);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(act, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(act, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ if (err)
+ return err;
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *basic;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
- ip_proto = basic->ip_proto;
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
}
if (set_eth.head.len_lw) {
@@ -733,7 +726,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
}
static int
-nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
+nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -753,7 +746,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
+ err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
tun_out_cnt);
if (err)
return err;
@@ -764,7 +757,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
/* nfp_fl_pre_lag returns -err or size of prelag action added.
* This will be 0 if it is not egressing to a lag dev.
*/
- prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
+ prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len);
if (prelag_size < 0)
return prelag_size;
else if (prelag_size > 0 && (!last || *out_cnt))
@@ -778,7 +771,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
}
static int
-nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
@@ -791,23 +784,25 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
struct nfp_fl_pop_vlan *pop_v;
int err;
- if (is_tcf_gact_shot(a)) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
- } else if (is_tcf_mirred_egress_redirect(a)) {
- err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
+ break;
+ case FLOW_ACTION_REDIRECT:
+ err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
out_cnt, csum_updated);
if (err)
return err;
-
- } else if (is_tcf_mirred_egress_mirror(a)) {
- err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
+ break;
+ case FLOW_ACTION_MIRRED:
+ err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
out_cnt, csum_updated);
if (err)
return err;
-
- } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
+ break;
+ case FLOW_ACTION_VLAN_POP:
if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
@@ -816,19 +811,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl_pop_vlan(pop_v);
*a_len += sizeof(struct nfp_fl_pop_vlan);
- } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
return -EOPNOTSUPP;
psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
- nfp_fl_push_vlan(psh_v, a);
+ nfp_fl_push_vlan(psh_v, act);
*a_len += sizeof(struct nfp_fl_push_vlan);
- } else if (is_tcf_tunnel_set(a)) {
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
+ break;
+ case FLOW_ACTION_TUNNEL_ENCAP: {
+ const struct ip_tunnel_info *ip_tun = act->tunnel;
- *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
@@ -847,32 +844,36 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
*a_len += sizeof(struct nfp_fl_pre_tunnel);
- err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, act);
if (err)
return err;
set_tun = (void *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
+ err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun,
*tun_type, netdev);
if (err)
return err;
*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
- } else if (is_tcf_tunnel_release(a)) {
+ }
+ break;
+ case FLOW_ACTION_TUNNEL_DECAP:
/* Tunnel decap is handled by default so accept action. */
return 0;
- } else if (is_tcf_pedit(a)) {
- if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+ case FLOW_ACTION_MANGLE:
+ if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
a_len, csum_updated))
return -EOPNOTSUPP;
- } else if (is_tcf_csum(a)) {
+ break;
+ case FLOW_ACTION_CSUM:
/* csum action requests recalc of something we have not fixed */
- if (tcf_csum_update_flags(a) & ~*csum_updated)
+ if (act->csum_flags & ~*csum_updated)
return -EOPNOTSUPP;
/* If we will correctly fix the csum we can remove it from the
* csum update list. Which will later be used to check support.
*/
- *csum_updated &= ~tcf_csum_update_flags(a);
- } else {
+ *csum_updated &= ~act->csum_flags;
+ break;
+ default:
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
}
@@ -887,7 +888,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
enum nfp_flower_tun_type tun_type;
- const struct tc_action *a;
+ struct flow_action_entry *act;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -898,8 +899,8 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
- tcf_exts_for_each_action(i, a, flow->exts) {
- err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+ flow_action_for_each(i, act, &flow->rule->action) {
+ err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated);
if (err)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index c04a0d6b0184..1279fa5da9e1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -8,31 +8,41 @@
#include "main.h"
static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
- struct tc_cls_flower_offload *flow, u8 key_type,
- bool mask_version)
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
+ struct nfp_flower_meta_tci *msk,
+ struct tc_cls_flower_offload *flow, u8 key_type)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_vlan *flow_vlan;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
u16 tmp_tci;
- memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
+ memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
+ memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
+
/* Populate the metadata frame. */
- frame->nfp_flow_key_layer = key_type;
- frame->mask_id = ~0;
+ ext->nfp_flow_key_layer = key_type;
+ ext->mask_id = ~0;
+
+ msk->nfp_flow_key_layer = key_type;
+ msk->mask_id = ~0;
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- target);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
+ if (match.key->vlan_id || match.key->vlan_priority) {
+ tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.key->vlan_id) |
+ NFP_FLOWER_MASK_VLAN_CFI;
+ ext->tci = cpu_to_be16(tmp_tci);
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- flow_vlan->vlan_priority) |
+ match.mask->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- flow_vlan->vlan_id) |
+ match.mask->vlan_id) |
NFP_FLOWER_MASK_VLAN_CFI;
- frame->tci = cpu_to_be16(tmp_tci);
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
}
@@ -64,231 +74,244 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
}
static void
-nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
+ struct nfp_flower_mac_mpls *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_eth_addrs *addr;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
- memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
+ memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
+ memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
- target);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
/* Populate mac frame. */
- ether_addr_copy(frame->mac_dst, &addr->dst[0]);
- ether_addr_copy(frame->mac_src, &addr->src[0]);
+ ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
+ ether_addr_copy(ext->mac_src, &match.key->src[0]);
+ ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
+ ether_addr_copy(msk->mac_src, &match.mask->src[0]);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
- struct flow_dissector_key_mpls *mpls;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match;
u32 t_mpls;
- mpls = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_MPLS,
- target);
-
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ flow_rule_match_mpls(rule, &match);
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
NFP_FLOWER_MASK_MPLS_Q;
-
- frame->mpls_lse = cpu_to_be32(t_mpls);
- } else if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC)) {
+ ext->mpls_lse = cpu_to_be32(t_mpls);
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+ msk->mpls_lse = cpu_to_be32(t_mpls);
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
/* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
* bit, which indicates an mpls ether type but without any
* mpls fields.
*/
- struct flow_dissector_key_basic *key_basic;
-
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
- if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
- key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
- frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+ match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
+ ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
+ }
}
}
static void
-nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
+ struct nfp_flower_tp_ports *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ports *tp;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
- memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
+ memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
+ memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- tp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- target);
- frame->port_src = tp->src;
- frame->port_dst = tp->dst;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ ext->port_src = match.key->src;
+ ext->port_dst = match.key->dst;
+ msk->port_src = match.mask->src;
+ msk->port_dst = match.mask->dst;
}
}
static void
-nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
+ struct nfp_flower_ip_ext *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *basic;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
- frame->proto = basic->ip_proto;
+ flow_rule_match_basic(rule, &match);
+ ext->proto = match.key->ip_proto;
+ msk->proto = match.mask->ip_proto;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *flow_ip;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
- flow_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- target);
- frame->tos = flow_ip->tos;
- frame->ttl = flow_ip->ttl;
+ flow_rule_match_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
- struct flow_dissector_key_tcp *tcp;
- u32 tcp_flags;
-
- tcp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_TCP, target);
- tcp_flags = be16_to_cpu(tcp->flags);
-
- if (tcp_flags & TCPHDR_FIN)
- frame->flags |= NFP_FL_TCP_FLAG_FIN;
- if (tcp_flags & TCPHDR_SYN)
- frame->flags |= NFP_FL_TCP_FLAG_SYN;
- if (tcp_flags & TCPHDR_RST)
- frame->flags |= NFP_FL_TCP_FLAG_RST;
- if (tcp_flags & TCPHDR_PSH)
- frame->flags |= NFP_FL_TCP_FLAG_PSH;
- if (tcp_flags & TCPHDR_URG)
- frame->flags |= NFP_FL_TCP_FLAG_URG;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+ u16 tcp_flags;
+
+ flow_rule_match_tcp(rule, &match);
+ tcp_flags = be16_to_cpu(match.key->flags);
+
+ if (tcp_flags & TCPHDR_FIN) {
+ ext->flags |= NFP_FL_TCP_FLAG_FIN;
+ msk->flags |= NFP_FL_TCP_FLAG_FIN;
+ }
+ if (tcp_flags & TCPHDR_SYN) {
+ ext->flags |= NFP_FL_TCP_FLAG_SYN;
+ msk->flags |= NFP_FL_TCP_FLAG_SYN;
+ }
+ if (tcp_flags & TCPHDR_RST) {
+ ext->flags |= NFP_FL_TCP_FLAG_RST;
+ msk->flags |= NFP_FL_TCP_FLAG_RST;
+ }
+ if (tcp_flags & TCPHDR_PSH) {
+ ext->flags |= NFP_FL_TCP_FLAG_PSH;
+ msk->flags |= NFP_FL_TCP_FLAG_PSH;
+ }
+ if (tcp_flags & TCPHDR_URG) {
+ ext->flags |= NFP_FL_TCP_FLAG_URG;
+ msk->flags |= NFP_FL_TCP_FLAG_URG;
+ }
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
- key = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- target);
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
- frame->flags |= NFP_FL_IP_FRAGMENTED;
- if (key->flags & FLOW_DIS_FIRST_FRAG)
- frame->flags |= NFP_FL_IP_FRAG_FIRST;
+ flow_rule_match_control(rule, &match);
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT) {
+ ext->flags |= NFP_FL_IP_FRAGMENTED;
+ msk->flags |= NFP_FL_IP_FRAGMENTED;
+ }
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG) {
+ ext->flags |= NFP_FL_IP_FRAG_FIRST;
+ msk->flags |= NFP_FL_IP_FRAG_FIRST;
+ }
}
}
static void
-nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
+ struct nfp_flower_ipv4 *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *addr;
-
- memset(frame, 0, sizeof(struct nfp_flower_ipv4));
-
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- target);
- frame->ipv4_src = addr->src;
- frame->ipv4_dst = addr->dst;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_match_ipv4_addrs match;
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ flow_rule_match_ipv4_addrs(rule, &match);
+ ext->ipv4_src = match.key->src;
+ ext->ipv4_dst = match.key->dst;
+ msk->ipv4_src = match.mask->src;
+ msk->ipv4_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
static void
-nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
+ struct nfp_flower_ipv6 *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv6_addrs *addr;
-
- memset(frame, 0, sizeof(struct nfp_flower_ipv6));
-
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- target);
- frame->ipv6_src = addr->src;
- frame->ipv6_dst = addr->dst;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ ext->ipv6_src = match.key->src;
+ ext->ipv6_dst = match.key->dst;
+ msk->ipv6_src = match.mask->src;
+ msk->ipv6_dst = match.mask->dst;
}
- nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
}
static int
-nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_geneve_opt(void *ext, void *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_enc_opts *opts;
+ struct flow_match_enc_opts match;
- opts = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_OPTS,
- target);
- memcpy(key_buf, opts->data, opts->len);
+ flow_rule_match_enc_opts(flow->rule, &match);
+ memcpy(ext, match.key->data, match.key->len);
+ memcpy(msk, match.mask->data, match.mask->len);
return 0;
}
static void
-nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
+ struct nfp_flower_ipv4_udp_tun *msk,
+ struct tc_cls_flower_offload *flow)
{
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *tun_ips;
- struct flow_dissector_key_keyid *vni;
- struct flow_dissector_key_ip *ip;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
- memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match;
u32 temp_vni;
- vni = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- target);
- temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
- frame->tun_id = cpu_to_be32(temp_vni);
+ flow_rule_match_enc_keyid(rule, &match);
+ temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ ext->tun_id = cpu_to_be32(temp_vni);
+ temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ msk->tun_id = cpu_to_be32(temp_vni);
}
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
- tun_ips =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- target);
- frame->ip_src = tun_ips->src;
- frame->ip_dst = tun_ips->dst;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ ext->ip_src = match.key->src;
+ ext->ip_dst = match.key->dst;
+ msk->ip_src = match.mask->src;
+ msk->ip_dst = match.mask->dst;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
- ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IP,
- target);
- frame->tos = ip->tos;
- frame->ttl = ip->ttl;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ ext->tos = match.key->tos;
+ ext->ttl = match.key->ttl;
+ msk->tos = match.mask->tos;
+ msk->ttl = match.mask->ttl;
}
}
@@ -313,12 +336,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
- /* Populate Exact Metadata. */
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
- flow, key_ls->key_layer, false);
- /* Populate Mask Metadata. */
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
- flow, key_ls->key_layer, true);
+ (struct nfp_flower_meta_tci *)msk,
+ flow, key_ls->key_layer);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -348,45 +368,33 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_in_port);
if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
- /* Populate Exact MAC Data. */
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
- flow, false);
- /* Populate Mask MAC Data. */
- nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
- flow, true);
+ (struct nfp_flower_mac_mpls *)msk,
+ flow);
ext += sizeof(struct nfp_flower_mac_mpls);
msk += sizeof(struct nfp_flower_mac_mpls);
}
if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
- /* Populate Exact TP Data. */
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
- flow, false);
- /* Populate Mask TP Data. */
- nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
- flow, true);
+ (struct nfp_flower_tp_ports *)msk,
+ flow);
ext += sizeof(struct nfp_flower_tp_ports);
msk += sizeof(struct nfp_flower_tp_ports);
}
if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
- /* Populate Exact IPv4 Data. */
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
- flow, false);
- /* Populate Mask IPv4 Data. */
- nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
- flow, true);
+ (struct nfp_flower_ipv4 *)msk,
+ flow);
ext += sizeof(struct nfp_flower_ipv4);
msk += sizeof(struct nfp_flower_ipv4);
}
if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
- /* Populate Exact IPv4 Data. */
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
- flow, false);
- /* Populate Mask IPv4 Data. */
- nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
- flow, true);
+ (struct nfp_flower_ipv6 *)msk,
+ flow);
ext += sizeof(struct nfp_flower_ipv6);
msk += sizeof(struct nfp_flower_ipv6);
}
@@ -395,10 +403,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
__be32 tun_dst;
- /* Populate Exact VXLAN Data. */
- nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
- /* Populate Mask VXLAN Data. */
- nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+ nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
@@ -410,11 +415,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
- err = nfp_flower_compile_geneve_opt(ext, flow, false);
- if (err)
- return err;
-
- err = nfp_flower_compile_geneve_opt(msk, flow, true);
+ err = nfp_flower_compile_geneve_opt(ext, msk, flow);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2cdbf29ecbe7..fe1469d201af 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -102,23 +102,22 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{
- return dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
- dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
- dissector_uses_key(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS) ||
- dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
+
+ return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
}
static int
-nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
+nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
u32 *key_layer_two, int *key_size)
{
- if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
+ if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
return -EOPNOTSUPP;
- if (enc_opts->len > 0) {
+ if (enc_opts->key->len > 0) {
*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
*key_size += sizeof(struct nfp_flower_geneve_options);
}
@@ -133,20 +132,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
enum nfp_flower_tun_type *tun_type)
{
- struct flow_dissector_key_basic *mask_basic = NULL;
- struct flow_dissector_key_basic *key_basic = NULL;
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_basic basic = { NULL, NULL};
struct nfp_flower_priv *priv = app->priv;
u32 key_layer_two;
u8 key_layer;
int key_size;
int err;
- if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
+ if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
/* If any tun dissector is used then the required set must be used. */
- if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
- (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
!= NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
return -EOPNOTSUPP;
@@ -155,76 +155,53 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size = sizeof(struct nfp_flower_meta_tci) +
sizeof(struct nfp_flower_in_port);
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
- dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+ flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
key_layer |= NFP_FLOWER_LAYER_MAC;
key_size += sizeof(struct nfp_flower_mac_mpls);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_dissector_key_vlan *flow_vlan;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan vlan;
- flow_vlan = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_VLAN,
- flow->mask);
+ flow_rule_match_vlan(rule, &vlan);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
- flow_vlan->vlan_priority)
+ vlan.key->vlan_priority)
return -EOPNOTSUPP;
}
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
- struct flow_dissector_key_ports *mask_enc_ports = NULL;
- struct flow_dissector_key_enc_opts *enc_op = NULL;
- struct flow_dissector_key_ports *enc_ports = NULL;
- struct flow_dissector_key_control *mask_enc_ctl =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->mask);
- struct flow_dissector_key_control *enc_ctl =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_CONTROL,
- flow->key);
-
- if (mask_enc_ctl->addr_type != 0xffff ||
- enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_enc_opts enc_op = { NULL, NULL };
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_control enc_ctl;
+ struct flow_match_ports enc_ports;
+
+ flow_rule_match_enc_control(rule, &enc_ctl);
+
+ if (enc_ctl.mask->addr_type != 0xffff ||
+ enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
return -EOPNOTSUPP;
/* These fields are already verified as used. */
- mask_ipv4 =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
- flow->mask);
- if (mask_ipv4->dst != cpu_to_be32(~0))
+ flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
+ if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
return -EOPNOTSUPP;
- mask_enc_ports =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- flow->mask);
- enc_ports =
- skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- flow->key);
- if (mask_enc_ports->dst != cpu_to_be16(~0))
+ flow_rule_match_enc_ports(rule, &enc_ports);
+ if (enc_ports.mask->dst != cpu_to_be16(~0))
return -EOPNOTSUPP;
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_OPTS)) {
- enc_op = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_ENC_OPTS,
- flow->key);
- }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
+ flow_rule_match_enc_opts(rule, &enc_op);
- switch (enc_ports->dst) {
+ switch (enc_ports.key->dst) {
case htons(NFP_FL_VXLAN_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (enc_op)
+ if (enc_op.key)
return -EOPNOTSUPP;
break;
case htons(NFP_FL_GENEVE_PORT):
@@ -236,11 +213,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
- if (!enc_op)
+ if (!enc_op.key)
break;
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
return -EOPNOTSUPP;
- err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
+ err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
&key_size);
if (err)
return err;
@@ -254,19 +231,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- mask_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
+ flow_rule_match_basic(rule, &basic);
- key_basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- flow->key);
- }
-
- if (mask_basic && mask_basic->n_proto) {
+ if (basic.mask && basic.mask->n_proto) {
/* Ethernet type is present in the key. */
- switch (key_basic->n_proto) {
+ switch (basic.key->n_proto) {
case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
@@ -305,9 +275,9 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
- if (mask_basic && mask_basic->ip_proto) {
+ if (basic.mask && basic.mask->ip_proto) {
/* Ethernet type is present in the key. */
- switch (key_basic->ip_proto) {
+ switch (basic.key->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
@@ -324,14 +294,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
- struct flow_dissector_key_tcp *tcp;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp tcp;
u32 tcp_flags;
- tcp = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_TCP,
- flow->key);
- tcp_flags = be16_to_cpu(tcp->flags);
+ flow_rule_match_tcp(rule, &tcp);
+ tcp_flags = be16_to_cpu(tcp.key->flags);
if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
return -EOPNOTSUPP;
@@ -347,12 +315,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
* space, thus we need to ensure we include a IPv4/IPv6 key
* layer if we have not done so already.
*/
- if (!key_basic)
+ if (!basic.key)
return -EOPNOTSUPP;
if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
!(key_layer & NFP_FLOWER_LAYER_IPV6)) {
- switch (key_basic->n_proto) {
+ switch (basic.key->n_proto) {
case cpu_to_be16(ETH_P_IP):
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
@@ -369,14 +337,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key_ctl;
-
- key_ctl = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_CONTROL,
- flow->key);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control ctl;
- if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
+ flow_rule_match_control(rule, &ctl);
+ if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
return -EOPNOTSUPP;
}
@@ -589,9 +554,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
- tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
- priv->stats[ctx_id].pkts,
- priv->stats[ctx_id].used);
+ flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 7d2d4241498f..776f6c07701b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -36,7 +36,6 @@
#include <linux/vmalloc.h>
#include <linux/ktime.h>
-#include <net/switchdev.h>
#include <net/vxlan.h>
#include "nfpcore/nfp_nsp.h"
@@ -3531,6 +3530,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
+ .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
};
/**
@@ -3815,8 +3815,6 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->netdev_ops = &nfp_net_netdev_ops;
netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
- SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
-
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 69d7aebda09b..62839807e21e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -5,7 +5,6 @@
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/lockdep.h>
#include <net/dst_metadata.h>
-#include <net/switchdev.h>
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
@@ -273,6 +272,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
};
void
@@ -336,8 +336,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->max_mtu = pf_netdev->max_mtu;
- SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
-
/* Set features the lower device can support with representors */
if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 86bc149ca231..7e90880fa46b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -31,34 +31,22 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
return NULL;
}
-static int
-nfp_port_attr_get(struct net_device *netdev, struct switchdev_attr *attr)
+int nfp_port_get_port_parent_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
{
struct nfp_port *port;
+ const u8 *serial;
port = nfp_port_from_netdev(netdev);
if (!port)
return -EOPNOTSUPP;
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: {
- const u8 *serial;
- /* N.B: attr->u.ppid.id is binary data */
- attr->u.ppid.id_len = nfp_cpp_serial(port->app->cpp, &serial);
- memcpy(&attr->u.ppid.id, serial, attr->u.ppid.id_len);
- break;
- }
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial);
+ memcpy(&ppid->id, serial, ppid->id_len);
return 0;
}
-const struct switchdev_ops nfp_port_switchdev_ops = {
- .switchdev_port_attr_get = nfp_port_attr_get,
-};
-
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index b2479a2a49e5..90ae053f5c07 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -7,6 +7,7 @@
#include <net/devlink.h>
struct net_device;
+struct netdev_phys_item_id;
struct nfp_app;
struct nfp_pf;
struct nfp_port;
@@ -90,7 +91,6 @@ struct nfp_port {
};
extern const struct ethtool_ops nfp_port_ethtool_ops;
-extern const struct switchdev_ops nfp_port_switchdev_ops;
__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...);
@@ -106,6 +106,8 @@ int
nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
+int nfp_port_get_port_parent_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid);
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 73a98bd2fcd2..96f7a9818294 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1282,6 +1282,7 @@ static int nixge_of_get_resources(struct platform_device *pdev)
static int nixge_probe(struct platform_device *pdev)
{
+ struct device_node *mn, *phy_node;
struct nixge_priv *priv;
struct net_device *ndev;
const u8 *mac_addr;
@@ -1335,10 +1336,14 @@ static int nixge_probe(struct platform_device *pdev)
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- err = nixge_mdio_setup(priv, pdev->dev.of_node);
- if (err) {
- netdev_err(ndev, "error registering mdio bus");
- goto free_netdev;
+ mn = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (mn) {
+ err = nixge_mdio_setup(priv, mn);
+ of_node_put(mn);
+ if (err) {
+ netdev_err(ndev, "error registering mdio bus");
+ goto free_netdev;
+ }
}
priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
@@ -1348,23 +1353,33 @@ static int nixge_probe(struct platform_device *pdev)
goto unregister_mdio;
}
- priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (!priv->phy_node) {
- netdev_err(ndev, "not find \"phy-handle\" property\n");
- err = -EINVAL;
- goto unregister_mdio;
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(pdev->dev.of_node);
+ if (err < 0) {
+ netdev_err(ndev, "broken fixed-link specification\n");
+ goto unregister_mdio;
+ }
+ phy_node = of_node_get(pdev->dev.of_node);
}
+ priv->phy_node = phy_node;
err = register_netdev(priv->ndev);
if (err) {
netdev_err(ndev, "register_netdev() error (%i)\n", err);
- goto unregister_mdio;
+ goto free_phy;
}
return 0;
+free_phy:
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
+ of_node_put(phy_node);
+
unregister_mdio:
- mdiobus_unregister(priv->mii_bus);
+ if (priv->mii_bus)
+ mdiobus_unregister(priv->mii_bus);
free_netdev:
free_netdev(ndev);
@@ -1379,7 +1394,12 @@ static int nixge_remove(struct platform_device *pdev)
unregister_netdev(ndev);
- mdiobus_unregister(priv->mii_bus);
+ if (of_phy_is_fixed_link(pdev->dev.of_node))
+ of_phy_deregister_fixed_link(pdev->dev.of_node);
+ of_node_put(priv->phy_node);
+
+ if (priv->mii_bus)
+ mdiobus_unregister(priv->mii_bus);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index b16ce7d93caf..add922b93d2c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1665,198 +1665,6 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
return 0;
}
-static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- if ((fs->h_u.tcp_ip4_spec.ip4src &
- fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
- DP_INFO(edev, "Don't support IP-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip4_spec.ip4dst &
- fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
- DP_INFO(edev, "Don't support IP-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip4_spec.psrc &
- fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip4_spec.pdst &
- fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if (fs->h_u.tcp_ip4_spec.tos) {
- DP_INFO(edev, "Don't support tos\n");
- return -EOPNOTSUPP;
- }
-
- t->eth_proto = htons(ETH_P_IP);
- t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
- t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
- t->src_port = fs->h_u.tcp_ip4_spec.psrc;
- t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
-
- return qede_set_v4_tuple_to_profile(edev, t);
-}
-
-static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_TCP;
-
- if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_UDP;
-
- if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- struct in6_addr zero_addr;
-
- memset(&zero_addr, 0, sizeof(zero_addr));
-
- if ((fs->h_u.tcp_ip6_spec.psrc &
- fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if ((fs->h_u.tcp_ip6_spec.pdst &
- fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
- DP_INFO(edev, "Don't support port-masks\n");
- return -EOPNOTSUPP;
- }
-
- if (fs->h_u.tcp_ip6_spec.tclass) {
- DP_INFO(edev, "Don't support tclass\n");
- return -EOPNOTSUPP;
- }
-
- t->eth_proto = htons(ETH_P_IPV6);
- memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
- sizeof(struct in6_addr));
- memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
- sizeof(struct in6_addr));
- t->src_port = fs->h_u.tcp_ip6_spec.psrc;
- t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
-
- return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
-}
-
-static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_TCP;
-
- if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- t->ip_proto = IPPROTO_UDP;
-
- if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
- return -EINVAL;
-
- return 0;
-}
-
-static int qede_flow_spec_to_tuple(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct ethtool_rx_flow_spec *fs)
-{
- memset(t, 0, sizeof(*t));
-
- if (qede_flow_spec_validate_unused(edev, fs))
- return -EOPNOTSUPP;
-
- switch ((fs->flow_type & ~FLOW_EXT)) {
- case TCP_V4_FLOW:
- return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
- case UDP_V4_FLOW:
- return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
- case TCP_V6_FLOW:
- return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
- case UDP_V6_FLOW:
- return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
- default:
- DP_VERBOSE(edev, NETIF_MSG_IFUP,
- "Can't support flow of type %08x\n", fs->flow_type);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int qede_flow_spec_validate(struct qede_dev *edev,
- struct ethtool_rx_flow_spec *fs,
- struct qede_arfs_tuple *t)
-{
- if (fs->location >= QEDE_RFS_MAX_FLTR) {
- DP_INFO(edev, "Location out-of-bounds\n");
- return -EINVAL;
- }
-
- /* Check location isn't already in use */
- if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
- DP_INFO(edev, "Location already in use\n");
- return -EINVAL;
- }
-
- /* Check if the filtering-mode could support the filter */
- if (edev->arfs->filter_count &&
- edev->arfs->mode != t->mode) {
- DP_INFO(edev,
- "flow_spec would require filtering mode %08x, but %08x is configured\n",
- t->mode, edev->arfs->filter_count);
- return -EINVAL;
- }
-
- /* If drop requested then no need to validate other data */
- if (fs->ring_cookie == RX_CLS_FLOW_DISC)
- return 0;
-
- if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
- return 0;
-
- if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
- DP_INFO(edev, "Queue out-of-bounds\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/* Must be called while qede lock is held */
static struct qede_arfs_fltr_node *
qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
@@ -1896,72 +1704,6 @@ static void qede_flow_set_destination(struct qede_dev *edev,
"Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
}
-int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
-{
- struct ethtool_rx_flow_spec *fsp = &info->fs;
- struct qede_arfs_fltr_node *n;
- struct qede_arfs_tuple t;
- int min_hlen, rc;
-
- __qede_lock(edev);
-
- if (!edev->arfs) {
- rc = -EPERM;
- goto unlock;
- }
-
- /* Translate the flow specification into something fittign our DB */
- rc = qede_flow_spec_to_tuple(edev, &t, fsp);
- if (rc)
- goto unlock;
-
- /* Make sure location is valid and filter isn't already set */
- rc = qede_flow_spec_validate(edev, fsp, &t);
- if (rc)
- goto unlock;
-
- if (qede_flow_find_fltr(edev, &t)) {
- rc = -EINVAL;
- goto unlock;
- }
-
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n) {
- rc = -ENOMEM;
- goto unlock;
- }
-
- min_hlen = qede_flow_get_min_header_size(&t);
- n->data = kzalloc(min_hlen, GFP_KERNEL);
- if (!n->data) {
- kfree(n);
- rc = -ENOMEM;
- goto unlock;
- }
-
- n->sw_id = fsp->location;
- set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
- n->buf_len = min_hlen;
-
- memcpy(&n->tuple, &t, sizeof(n->tuple));
-
- qede_flow_set_destination(edev, n, fsp);
-
- /* Build a minimal header according to the flow */
- n->tuple.build_hdr(&n->tuple, n->data);
-
- rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
- if (rc)
- goto unlock;
-
- qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
- rc = qede_poll_arfs_filter_config(edev, n);
-unlock:
- __qede_unlock(edev);
-
- return rc;
-}
-
int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
{
struct qede_arfs_fltr_node *fltr = NULL;
@@ -2004,190 +1746,172 @@ unlock:
}
static int qede_parse_actions(struct qede_dev *edev,
- struct tcf_exts *exts)
+ struct flow_action *flow_action)
{
- int rc = -EINVAL, num_act = 0, i;
- const struct tc_action *a;
- bool is_drop = false;
+ const struct flow_action_entry *act;
+ int i;
- if (!tcf_exts_has_actions(exts)) {
- DP_NOTICE(edev, "No tc actions received\n");
- return rc;
+ if (!flow_action_has_entries(flow_action)) {
+ DP_NOTICE(edev, "No actions received\n");
+ return -EINVAL;
}
- tcf_exts_for_each_action(i, a, exts) {
- num_act++;
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ break;
+ case FLOW_ACTION_QUEUE:
+ if (act->queue.vf)
+ break;
- if (is_tcf_gact_shot(a))
- is_drop = true;
+ if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
+ DP_INFO(edev, "Queue out-of-bounds\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
}
- if (num_act == 1 && is_drop)
- return 0;
-
- return rc;
+ return 0;
}
static int
-qede_tc_parse_ports(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
- struct qede_arfs_tuple *t)
-{
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
- struct flow_dissector_key_ports *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_PORTS,
- f->mask);
-
- if ((key->src && mask->src != U16_MAX) ||
- (key->dst && mask->dst != U16_MAX)) {
+qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if ((match.key->src && match.mask->src != U16_MAX) ||
+ (match.key->dst && match.mask->dst != U16_MAX)) {
DP_NOTICE(edev, "Do not support ports masks\n");
return -EINVAL;
}
- t->src_port = key->src;
- t->dst_port = key->dst;
+ t->src_port = match.key->src;
+ t->dst_port = match.key->dst;
}
return 0;
}
static int
-qede_tc_parse_v6_common(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
{
struct in6_addr zero_addr, addr;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
- struct flow_dissector_key_ipv6_addrs *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
- if ((memcmp(&key->src, &zero_addr, sizeof(addr)) &&
- memcmp(&mask->src, &addr, sizeof(addr))) ||
- (memcmp(&key->dst, &zero_addr, sizeof(addr)) &&
- memcmp(&mask->dst, &addr, sizeof(addr)))) {
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
+ memcmp(&match.mask->src, &addr, sizeof(addr))) ||
+ (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
+ memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
DP_NOTICE(edev,
"Do not support IPv6 address prefix/mask\n");
return -EINVAL;
}
- memcpy(&t->src_ipv6, &key->src, sizeof(addr));
- memcpy(&t->dst_ipv6, &key->dst, sizeof(addr));
+ memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
+ memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
}
- if (qede_tc_parse_ports(edev, f, t))
+ if (qede_flow_parse_ports(edev, rule, t))
return -EINVAL;
return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
}
static int
-qede_tc_parse_v4_common(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *t)
{
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- struct flow_dissector_key_ipv4_addrs *key, *mask;
-
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->key);
- mask = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- f->mask);
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
- if ((key->src && mask->src != U32_MAX) ||
- (key->dst && mask->dst != U32_MAX)) {
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if ((match.key->src && match.mask->src != U32_MAX) ||
+ (match.key->dst && match.mask->dst != U32_MAX)) {
DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
return -EINVAL;
}
- t->src_ipv4 = key->src;
- t->dst_ipv4 = key->dst;
+ t->src_ipv4 = match.key->src;
+ t->dst_ipv4 = match.key->dst;
}
- if (qede_tc_parse_ports(edev, f, t))
+ if (qede_flow_parse_ports(edev, rule, t))
return -EINVAL;
return qede_set_v4_tuple_to_profile(edev, t);
}
static int
-qede_tc_parse_tcp_v6(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_tc_parse_v6_common(edev, f, tuple);
+ return qede_flow_parse_v6_common(edev, rule, tuple);
}
static int
-qede_tc_parse_tcp_v4(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_tc_parse_v4_common(edev, f, tuple);
+ return qede_flow_parse_v4_common(edev, rule, tuple);
}
static int
-qede_tc_parse_udp_v6(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_tc_parse_v6_common(edev, f, tuple);
+ return qede_flow_parse_v6_common(edev, rule, tuple);
}
static int
-qede_tc_parse_udp_v4(struct qede_dev *edev,
- struct tc_cls_flower_offload *f,
+qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
struct qede_arfs_tuple *tuple)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_tc_parse_v4_common(edev, f, tuple);
+ return qede_flow_parse_v4_common(edev, rule, tuple);
}
static int
-qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
- struct tc_cls_flower_offload *f,
- struct qede_arfs_tuple *tuple)
+qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
+ struct flow_rule *rule, struct qede_arfs_tuple *tuple)
{
+ struct flow_dissector *dissector = rule->match.dissector;
int rc = -EINVAL;
u8 ip_proto = 0;
memset(tuple, 0, sizeof(*tuple));
- if (f->dissector->used_keys &
+ if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
DP_NOTICE(edev, "Unsupported key set:0x%x\n",
- f->dissector->used_keys);
+ dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -2197,25 +1921,23 @@ qede_parse_flower_attr(struct qede_dev *edev, __be16 proto,
return -EPROTONOSUPPORT;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- struct flow_dissector_key_basic *key;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
- key = skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- f->key);
- ip_proto = key->ip_proto;
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
- rc = qede_tc_parse_tcp_v4(edev, f, tuple);
+ rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
- rc = qede_tc_parse_tcp_v6(edev, f, tuple);
+ rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
- rc = qede_tc_parse_udp_v4(edev, f, tuple);
+ rc = qede_flow_parse_udp_v4(edev, rule, tuple);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
- rc = qede_tc_parse_udp_v6(edev, f, tuple);
+ rc = qede_flow_parse_udp_v6(edev, rule, tuple);
else
- DP_NOTICE(edev, "Invalid tc protocol request\n");
+ DP_NOTICE(edev, "Invalid protocol request\n");
return rc;
}
@@ -2235,7 +1957,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flower_attr(edev, proto, f, &t))
+ if (qede_parse_flow_attr(edev, proto, f->rule, &t))
goto unlock;
/* Validate profile mode and number of filters */
@@ -2248,7 +1970,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, f->exts))
+ if (qede_parse_actions(edev, &f->rule->action))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -2290,3 +2012,141 @@ unlock:
__qede_unlock(edev);
return rc;
}
+
+static int qede_flow_spec_validate(struct qede_dev *edev,
+ struct flow_action *flow_action,
+ struct qede_arfs_tuple *t,
+ __u32 location)
+{
+ if (location >= QEDE_RFS_MAX_FLTR) {
+ DP_INFO(edev, "Location out-of-bounds\n");
+ return -EINVAL;
+ }
+
+ /* Check location isn't already in use */
+ if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
+ DP_INFO(edev, "Location already in use\n");
+ return -EINVAL;
+ }
+
+ /* Check if the filtering-mode could support the filter */
+ if (edev->arfs->filter_count &&
+ edev->arfs->mode != t->mode) {
+ DP_INFO(edev,
+ "flow_spec would require filtering mode %08x, but %08x is configured\n",
+ t->mode, edev->arfs->filter_count);
+ return -EINVAL;
+ }
+
+ if (qede_parse_actions(edev, flow_action))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_rule(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_rx_flow_spec_input input = {};
+ struct ethtool_rx_flow_rule *flow;
+ __be16 proto;
+ int err = 0;
+
+ if (qede_flow_spec_validate_unused(edev, fs))
+ return -EOPNOTSUPP;
+
+ switch ((fs->flow_type & ~FLOW_EXT)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ proto = htons(ETH_P_IP);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Can't support flow of type %08x\n", fs->flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /* Make sure location is valid and filter isn't already set */
+ err = qede_flow_spec_validate(edev, &flow->rule->action, t,
+ fs->location);
+err_out:
+ ethtool_rx_flow_rule_destroy(flow);
+ return err;
+
+}
+
+int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct ethtool_rx_flow_spec *fsp = &info->fs;
+ struct qede_arfs_fltr_node *n;
+ struct qede_arfs_tuple t;
+ int min_hlen, rc;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* Translate the flow specification into something fittign our DB */
+ rc = qede_flow_spec_to_rule(edev, &t, fsp);
+ if (rc)
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ n->sw_id = fsp->location;
+ set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
+ n->buf_len = min_hlen;
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ qede_flow_set_destination(edev, n, fsp);
+
+ /* Build a minimal header according to the flow */
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+unlock:
+ __qede_unlock(edev);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e8a112149a62..1dd72137fd53 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7110,6 +7110,30 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
}
+static void rtl_read_mac_address(struct rtl8169_private *tp,
+ u8 mac_addr[ETH_ALEN])
+{
+ u32 value;
+
+ /* Get MAC address */
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
+ mac_addr[0] = (value >> 0) & 0xff;
+ mac_addr[1] = (value >> 8) & 0xff;
+ mac_addr[2] = (value >> 16) & 0xff;
+ mac_addr[3] = (value >> 24) & 0xff;
+
+ value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ mac_addr[4] = (value >> 0) & 0xff;
+ mac_addr[5] = (value >> 8) & 0xff;
+ break;
+ default:
+ break;
+ }
+}
+
DECLARE_RTL_COND(rtl_link_list_ready_cond)
{
return RTL_R8(tp, MCU) & LINK_LIST_RDY;
@@ -7301,6 +7325,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
+ /* align to u16 for is_valid_ether_addr() */
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
struct rtl8169_private *tp;
struct net_device *dev;
int chipset, region, i;
@@ -7403,20 +7429,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&tp->rx_stats.syncp);
u64_stats_init(&tp->tx_stats.syncp);
- /* Get MAC address */
- switch (tp->mac_version) {
- u8 mac_addr[ETH_ALEN] __aligned(4);
- case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
- *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ /* get MAC address */
+ rc = eth_platform_get_mac_address(&pdev->dev, mac_addr);
+ if (rc)
+ rtl_read_mac_address(tp, mac_addr);
+
+ if (is_valid_ether_addr(mac_addr))
+ rtl_rar_set(tp, mac_addr);
- if (is_valid_ether_addr(mac_addr))
- rtl_rar_set(tp, mac_addr);
- break;
- default:
- break;
- }
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f27a0dc8c563..339b2eae2100 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -555,7 +555,7 @@ static int sh_eth_soft_reset_gether(struct net_device *ndev)
sh_eth_write(ndev, 0, RDFFR);
/* Reset HW CRC register */
- if (mdp->cd->hw_checksum)
+ if (mdp->cd->csmr)
sh_eth_write(ndev, 0, CSMR);
/* Select MII mode */
@@ -619,7 +619,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.tsu = 1,
.no_tx_cntrs = 1,
};
@@ -668,7 +669,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.tsu = 1,
.select_mii = 1,
.magic = 1,
@@ -793,7 +795,8 @@ static struct sh_eth_cpu_data r8a77980_data = {
.no_trimd = 1,
.no_ade = 1,
.xdfar_rw = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.select_mii = 1,
.magic = 1,
.cexcr = 1,
@@ -1045,7 +1048,8 @@ static struct sh_eth_cpu_data sh7734_data = {
.no_ade = 1,
.xdfar_rw = 1,
.tsu = 1,
- .hw_checksum = 1,
+ .csmr = 1,
+ .rx_csum = 1,
.select_mii = 1,
.magic = 1,
.cexcr = 1,
@@ -1088,6 +1092,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.irq_flags = IRQF_SHARED,
.magic = 1,
.cexcr = 1,
+ .rx_csum = 1,
.dual_port = 1,
};
@@ -1532,8 +1537,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- /* PAUSE Prohibition */
+ /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
+ (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
if (mdp->cd->set_rate)
@@ -1592,6 +1598,19 @@ static void sh_eth_dev_exit(struct net_device *ndev)
update_mac_address(ndev);
}
+static void sh_eth_rx_csum(struct sk_buff *skb)
+{
+ u8 *hw_csum;
+
+ /* The hardware checksum is 2 bytes appended to packet data */
+ if (unlikely(skb->len < sizeof(__sum16)))
+ return;
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb_trim(skb, skb->len - sizeof(__sum16));
+}
+
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
{
@@ -1633,7 +1652,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
* the RFS bits are from bit 25 to bit 16. So, the
* driver needs right shifting by 16.
*/
- if (mdp->cd->hw_checksum)
+ if (mdp->cd->csmr)
desc_status >>= 16;
skb = mdp->rx_skbuff[entry];
@@ -1666,6 +1685,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ sh_eth_rx_csum(skb);
netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
@@ -2173,7 +2194,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
- if (cd->hw_checksum)
+ if (cd->csmr)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
@@ -2921,6 +2942,39 @@ static void sh_eth_set_rx_mode(struct net_device *ndev)
spin_unlock_irqrestore(&mdp->lock, flags);
}
+static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* Disable TX and RX */
+ sh_eth_rcv_snd_disable(ndev);
+
+ /* Modify RX Checksum setting */
+ sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
+
+ /* Enable TX and RX */
+ sh_eth_rcv_snd_enable(ndev);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+}
+
+static int sh_eth_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
+ sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
+
+ ndev->features = features;
+
+ return 0;
+}
+
static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
{
if (!mdp->port)
@@ -3102,6 +3156,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = sh_eth_set_features,
};
static const struct net_device_ops sh_eth_netdev_ops_tsu = {
@@ -3117,6 +3172,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_change_mtu = sh_eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = sh_eth_set_features,
};
#ifdef CONFIG_OF
@@ -3245,6 +3301,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
+ if (mdp->cd->rx_csum) {
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ }
+
/* set function */
if (mdp->cd->tsu)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
@@ -3294,7 +3355,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
mdp->port = port;
- ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
/* Need to init only the first port of the two sharing a TSU */
if (port == 0) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0c18650bbfe6..850726301e1c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -499,7 +499,8 @@ struct sh_eth_cpu_data {
unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */
unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */
unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */
- unsigned hw_checksum:1; /* E-DMAC has CSMR */
+ unsigned csmr:1; /* E-DMAC has CSMR */
+ unsigned rx_csum:1; /* EtherC has ECMR.RCSC */
unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 62a205eba9f7..66f72f8c46e5 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2026,6 +2026,18 @@ static void rocker_port_neigh_destroy(struct net_device *dev,
err);
}
+static int rocker_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
+
+ ppid->id_len = sizeof(rocker->hw.id);
+ memcpy(&ppid->id, &rocker->hw.id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_open = rocker_port_open,
.ndo_stop = rocker_port_stop,
@@ -2035,6 +2047,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
.ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
+ .ndo_get_port_parent_id = rocker_port_get_port_parent_id,
};
/********************
@@ -2045,14 +2058,9 @@ static int rocker_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
const struct rocker_port *rocker_port = netdev_priv(dev);
- const struct rocker *rocker = rocker_port->rocker;
int err = 0;
switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(rocker->hw.id);
- memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
- break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_get(rocker_port,
&attr->u.brport_flags);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 8d8e2b3f263e..75a50b59cb8f 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -22,7 +22,6 @@
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
-#include <net/switchdev.h>
#include "netdevsim.h"
@@ -148,26 +147,16 @@ static struct device_type nsim_dev_type = {
.release = nsim_dev_release,
};
-static int
-nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int nsim_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct netdevsim *ns = netdev_priv(dev);
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(ns->sdev->switch_id);
- memcpy(&attr->u.ppid.id, &ns->sdev->switch_id,
- attr->u.ppid.id_len);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ ppid->id_len = sizeof(ns->sdev->switch_id);
+ memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len);
+ return 0;
}
-static const struct switchdev_ops nsim_switchdev_ops = {
- .switchdev_port_attr_get = nsim_port_attr_get,
-};
-
static int nsim_init(struct net_device *dev)
{
char sdev_ddir_name[10], sdev_link_name[32];
@@ -214,7 +203,6 @@ static int nsim_init(struct net_device *dev)
goto err_bpf_uninit;
SET_NETDEV_DEV(dev, &ns->dev);
- SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops);
err = nsim_devlink_setup(ns);
if (err)
@@ -493,6 +481,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
+ .ndo_get_port_parent_id = nsim_get_port_parent_id,
};
static void nsim_setup(struct net_device *dev)
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8a8d9f606b3e..fc09c5c1a4de 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -127,17 +127,13 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
{
struct dp83867_private *dp83867 =
(struct dp83867_private *)phydev->priv;
- u16 val;
-
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN)
- val |= DP83867_CFG4_PORT_MIRROR_EN;
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ DP83867_CFG4_PORT_MIRROR_EN);
else
- val &= ~DP83867_CFG4_PORT_MIRROR_EN;
-
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
-
+ phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ DP83867_CFG4_PORT_MIRROR_EN);
return 0;
}
@@ -222,11 +218,9 @@ static int dp83867_config_init(struct phy_device *phydev)
}
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
- if (dp83867->rxctrl_strap_quirk) {
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
- val &= ~BIT(7);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
- }
+ if (dp83867->rxctrl_strap_quirk)
+ phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(7));
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
@@ -275,17 +269,11 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
delay);
- if (dp83867->io_impedance >= 0) {
- val = phy_read_mmd(phydev, DP83867_DEVADDR,
- DP83867_IO_MUX_CFG);
-
- val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
- val |= dp83867->io_impedance &
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL;
-
- phy_write_mmd(phydev, DP83867_DEVADDR,
- DP83867_IO_MUX_CFG, val);
- }
+ if (dp83867->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83867->io_impedance &
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
}
/* Enable Interrupt output INT_OE in CFG3 register */
@@ -299,12 +287,11 @@ static int dp83867_config_init(struct phy_device *phydev)
dp83867_config_port_mirroring(phydev);
/* Clock output selection if muxing property is set */
- if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) {
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG);
- val &= ~DP83867_IO_MUX_CFG_CLK_O_SEL_MASK;
- val |= (dp83867->clk_output_sel << DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, val);
- }
+ if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83867->clk_output_sel <<
+ DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
return 0;
}
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index da13356999e5..e9704af1d239 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -144,11 +144,8 @@ static int dp83811_set_wol(struct phy_device *phydev,
phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
value);
} else {
- value = phy_read_mmd(phydev, DP83811_DEVADDR,
- MII_DP83811_WOL_CFG);
- value &= ~DP83811_WOL_EN;
- phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
- value);
+ phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ DP83811_WOL_EN);
}
return 0;
@@ -328,14 +325,10 @@ static int dp83811_suspend(struct phy_device *phydev)
static int dp83811_resume(struct phy_device *phydev)
{
- int value;
-
genphy_resume(phydev);
- value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG);
-
- phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value |
- DP83811_WOL_CLR_INDICATION);
+ phy_set_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+ DP83811_WOL_CLR_INDICATION);
return 0;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 47a8cb574c45..f136a23c1a35 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
@@ -38,7 +38,7 @@ struct fixed_phy {
bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
- int link_gpio;
+ struct gpio_desc *link_gpiod;
};
static struct platform_device *pdev;
@@ -67,8 +67,8 @@ EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
static void fixed_phy_update(struct fixed_phy *fp)
{
- if (!fp->no_carrier && gpio_is_valid(fp->link_gpio))
- fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
+ if (!fp->no_carrier && fp->link_gpiod)
+ fp->status.link = !!gpiod_get_value_cansleep(fp->link_gpiod);
}
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
@@ -133,9 +133,9 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status,
- int link_gpio)
+static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod)
{
int ret;
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -156,24 +156,19 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
fp->addr = phy_addr;
fp->status = *status;
- fp->link_gpio = link_gpio;
-
- if (gpio_is_valid(fp->link_gpio)) {
- ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN,
- "fixed-link-gpio-link");
- if (ret)
- goto err_regs;
- }
+ fp->link_gpiod = gpiod;
fixed_phy_update(fp);
list_add_tail(&fp->node, &fmb->phys);
return 0;
+}
-err_regs:
- kfree(fp);
- return ret;
+int fixed_phy_add(unsigned int irq, int phy_addr,
+ struct fixed_phy_status *status) {
+
+ return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
@@ -187,8 +182,8 @@ static void fixed_phy_del(int phy_addr)
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
list_del(&fp->node);
- if (gpio_is_valid(fp->link_gpio))
- gpio_free(fp->link_gpio);
+ if (fp->link_gpiod)
+ gpiod_put(fp->link_gpiod);
kfree(fp);
ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
@@ -196,12 +191,50 @@ static void fixed_phy_del(int phy_addr)
}
}
+#ifdef CONFIG_OF_GPIO
+static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
+{
+ struct device_node *fixed_link_node;
+ struct gpio_desc *gpiod;
+
+ if (!np)
+ return NULL;
+
+ fixed_link_node = of_get_child_by_name(np, "fixed-link");
+ if (!fixed_link_node)
+ return NULL;
+
+ /*
+ * As the fixed link is just a device tree node without any
+ * Linux device associated with it, we simply have obtain
+ * the GPIO descriptor from the device tree like this.
+ */
+ gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0,
+ GPIOD_IN, "mdio");
+ of_node_put(fixed_link_node);
+ if (IS_ERR(gpiod)) {
+ if (PTR_ERR(gpiod) == -EPROBE_DEFER)
+ return gpiod;
+ pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
+ fixed_link_node);
+ gpiod = NULL;
+ }
+
+ return gpiod;
+}
+#else
+static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int link_gpio,
struct device_node *np)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct gpio_desc *gpiod = NULL;
struct phy_device *phy;
int phy_addr;
int ret;
@@ -209,12 +242,17 @@ struct phy_device *fixed_phy_register(unsigned int irq,
if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
+ /* Check if we have a GPIO associated with this fixed phy */
+ gpiod = fixed_phy_get_gpiod(np);
+ if (IS_ERR(gpiod))
+ return ERR_CAST(gpiod);
+
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
+ ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
if (ret < 0) {
ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 38cfc923a88a..5531f9cabc63 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -58,24 +58,6 @@ struct mv3310_priv {
char *hwmon_name;
};
-static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
- u16 mask, u16 bits)
-{
- int old, val, ret;
-
- old = phy_read_mmd(phydev, devad, reg);
- if (old < 0)
- return old;
-
- val = (old & ~mask) | (bits & mask);
- if (val == old)
- return 0;
-
- ret = phy_write_mmd(phydev, devad, reg, val);
-
- return ret < 0 ? ret : 1;
-}
-
#ifdef CONFIG_HWMON
static umode_t mv3310_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
@@ -159,8 +141,8 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
return ret;
val = enable ? MV_V2_TEMP_CTRL_SAMPLE : MV_V2_TEMP_CTRL_DISABLE;
- ret = mv3310_modify(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL,
- MV_V2_TEMP_CTRL_MASK, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL,
+ MV_V2_TEMP_CTRL_MASK, val);
return ret < 0 ? ret : 0;
}
@@ -363,18 +345,18 @@ static int mv3310_config_aneg(struct phy_device *phydev)
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_100BASE4 |
- ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- linkmode_adv_to_mii_adv_t(phydev->advertising));
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_100BASE4 |
+ ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
+ linkmode_adv_to_mii_adv_t(phydev->advertising));
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
- ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
- ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -387,8 +369,8 @@ static int mv3310_config_aneg(struct phy_device *phydev)
else
reg = 0;
- ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G, reg);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G, reg);
if (ret < 0)
return ret;
if (ret > 0)
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 03af927fa5ad..519866679c91 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -75,15 +75,9 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced);
*/
int genphy_c45_an_disable_aneg(struct phy_device *phydev)
{
- int val;
-
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (val < 0)
- return val;
-
- val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
- return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val);
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
@@ -97,15 +91,8 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg);
*/
int genphy_c45_restart_aneg(struct phy_device *phydev)
{
- int val;
-
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (val < 0)
- return val;
-
- val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
-
- return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val);
+ return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART);
}
EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 909b3344babf..7d6aad287f84 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -414,15 +414,15 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
}
/**
- * phy_read_mmd - Convenience function for reading a register
+ * __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to read from (0..31)
* @regnum: The register on the MMD to read (0..65535)
*
- * Same rules as for phy_read();
+ * Same rules as for __phy_read();
*/
-int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
{
int val;
@@ -434,33 +434,52 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
- val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
+ val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr);
} else {
struct mii_bus *bus = phydev->mdio.bus;
int phy_addr = phydev->mdio.addr;
- mutex_lock(&bus->mdio_lock);
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Read the content of the MMD's selected register */
val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
- mutex_unlock(&bus->mdio_lock);
}
return val;
}
+EXPORT_SYMBOL(__phy_read_mmd);
+
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for phy_read();
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_read_mmd(phydev, devad, regnum);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(phy_read_mmd);
/**
- * phy_write_mmd - Convenience function for writing a register
+ * __phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to read from
* @regnum: The register on the MMD to read
* @val: value to write to @regnum
*
- * Same rules as for phy_write();
+ * Same rules as for __phy_write();
*/
-int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
{
int ret;
@@ -472,23 +491,43 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
} else if (phydev->is_c45) {
u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff);
- ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
- addr, val);
+ ret = __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+ addr, val);
} else {
struct mii_bus *bus = phydev->mdio.bus;
int phy_addr = phydev->mdio.addr;
- mutex_lock(&bus->mdio_lock);
mmd_phy_indirect(bus, phy_addr, devad, regnum);
/* Write the data into MMD's selected register */
__mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
- mutex_unlock(&bus->mdio_lock);
ret = 0;
}
return ret;
}
+EXPORT_SYMBOL(__phy_write_mmd);
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_write_mmd(phydev, devad, regnum, val);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(phy_write_mmd);
/**
@@ -538,6 +577,57 @@ int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
}
EXPORT_SYMBOL_GPL(phy_modify);
+/**
+ * __phy_modify_mmd - Convenience function for modifying a register on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * Unlocked helper function which allows a MMD register to be modified as
+ * new register value = (old register value & ~mask) | set
+ */
+int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_read_mmd(phydev, devad, regnum);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write_mmd(phydev, devad, regnum, (ret & ~mask) | set);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(__phy_modify_mmd);
+
+/**
+ * phy_modify_mmd - Convenience function for modifying a register on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+ ret = __phy_modify_mmd(phydev, devad, regnum, mask, set);
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify_mmd);
+
static int __phy_read_page(struct phy_device *phydev)
{
return phydev->drv->read_page(phydev);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d12aa512b7f5..89ead29cec68 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1060,17 +1060,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phy_check_valid(phydev->speed, phydev->duplex, common))
goto eee_exit_err;
- if (clk_stop_enable) {
+ if (clk_stop_enable)
/* Configure the PHY to stop receiving xMII
* clock while it is signaling LPI.
*/
- int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (val < 0)
- return val;
-
- val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
- }
+ phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN);
return 0; /* EEE supported */
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index e96bc0c6140f..3d92ea6fcc02 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2051,8 +2051,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
phydev = phy_find_first(dev->mdiobus);
if (!phydev) {
netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
- NULL);
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (IS_ERR(phydev)) {
netdev_err(dev->net, "No PHY/fixed_PHY found\n");
return NULL;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2034ccc7cc72..021eb30d1fb7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1986,7 +1986,7 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
/* no default handler to allow compiler to check that the
* enum is fully handled
*/
- };
+ }
return "<unknown>";
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 54132af70094..aa1c71a76ef7 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1140,7 +1140,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
len -= ie_len;
data += ie_len;
- };
+ }
ret = 0;
out:
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index 77046384dd80..976c8ec4e992 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -668,15 +668,13 @@ static void b43_remove_dynamic_debug(struct b43_wldev *dev)
static void b43_add_dynamic_debug(struct b43_wldev *dev)
{
struct b43_dfsentry *e = dev->dfsentry;
- struct dentry *d;
-#define add_dyn_dbg(name, id, initstate) do { \
- e->dyn_debug[id] = (initstate); \
- d = debugfs_create_bool(name, 0600, e->subdir, \
- &(e->dyn_debug[id])); \
- if (!IS_ERR(d)) \
- e->dyn_debug_dentries[id] = d; \
- } while (0)
+#define add_dyn_dbg(name, id, initstate) do { \
+ e->dyn_debug[id] = (initstate); \
+ e->dyn_debug_dentries[id] = \
+ debugfs_create_bool(name, 0600, e->subdir, \
+ &(e->dyn_debug[id])); \
+ } while (0)
add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false);
add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, false);
@@ -718,19 +716,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy));
e->subdir = debugfs_create_dir(devdir, rootdir);
- if (!e->subdir || IS_ERR(e->subdir)) {
- if (e->subdir == ERR_PTR(-ENODEV)) {
- b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- } else {
- b43err(dev->wl, "debugfs: cannot create %s directory\n",
- devdir);
- }
- dev->dfsentry = NULL;
- kfree(log->log);
- kfree(e);
- return;
- }
e->mmio16read_next = 0xFFFF; /* invalid address */
e->mmio32read_next = 0xFFFF; /* invalid address */
@@ -741,13 +726,10 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
#define ADD_FILE(name, mode) \
do { \
- struct dentry *d; \
- d = debugfs_create_file(__stringify(name), \
+ e->file_##name.dentry = \
+ debugfs_create_file(__stringify(name), \
mode, e->subdir, dev, \
&fops_##name.fops); \
- e->file_##name.dentry = NULL; \
- if (!IS_ERR(d)) \
- e->file_##name.dentry = d; \
} while (0)
@@ -818,8 +800,6 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
void b43_debugfs_init(void)
{
rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(rootdir))
- rootdir = NULL;
}
void b43_debugfs_exit(void)
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 82ef56ed7ca1..8150adee3e34 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -361,15 +361,13 @@ static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev)
static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev)
{
struct b43legacy_dfsentry *e = dev->dfsentry;
- struct dentry *d;
-#define add_dyn_dbg(name, id, initstate) do { \
- e->dyn_debug[id] = (initstate); \
- d = debugfs_create_bool(name, 0600, e->subdir, \
- &(e->dyn_debug[id])); \
- if (!IS_ERR(d)) \
- e->dyn_debug_dentries[id] = d; \
- } while (0)
+#define add_dyn_dbg(name, id, initstate) do { \
+ e->dyn_debug[id] = (initstate); \
+ e->dyn_debug_dentries[id] = \
+ debugfs_create_bool(name, 0600, e->subdir, \
+ &(e->dyn_debug[id])); \
+ } while (0)
add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false);
add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, false);
@@ -408,29 +406,14 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev)
snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy));
e->subdir = debugfs_create_dir(devdir, rootdir);
- if (!e->subdir || IS_ERR(e->subdir)) {
- if (e->subdir == ERR_PTR(-ENODEV)) {
- b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not "
- "enabled in kernel config\n");
- } else {
- b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n",
- devdir);
- }
- dev->dfsentry = NULL;
- kfree(log->log);
- kfree(e);
- return;
- }
#define ADD_FILE(name, mode) \
do { \
- struct dentry *d; \
- d = debugfs_create_file(__stringify(name), \
+ e->file_##name.dentry = \
+ debugfs_create_file(__stringify(name), \
mode, e->subdir, dev, \
&fops_##name.fops); \
e->file_##name.dentry = NULL; \
- if (!IS_ERR(d)) \
- e->file_##name.dentry = d; \
} while (0)
@@ -492,8 +475,6 @@ void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev,
void b43legacy_debugfs_init(void)
{
rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(rootdir))
- rootdir = NULL;
}
void b43legacy_debugfs_exit(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 22fd95a736a8..f7cf3e5f4849 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -16,8 +16,8 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y += \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \
- -Idrivers/net/wireless/broadcom/brcm80211/include
+ -I $(srctree)/$(src) \
+ -I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index d64bf233b12c..ec129864cc9c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -315,7 +315,7 @@ static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
/* bail out as things are really fishy here */
WARN(1, "invalid sdio function number: %d\n", func->num);
err = -ENOMEDIUM;
- };
+ }
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 1f1e95a15a17..0ce1d8174e6d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
return err;
}
- err = request_firmware(&clm, clm_name, bus->dev);
+ err = firmware_request_nowarn(&clm, clm_name, bus->dev);
if (err) {
brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
err);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 51d76ac45075..7535cb0d4ac0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -43,6 +43,10 @@ static const struct brcmf_dmi_data meegopad_t08_data = {
BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08"
};
+static const struct brcmf_dmi_data pov_tab_p1006w_data = {
+ BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data"
+};
+
static const struct dmi_system_id dmi_platform_data[] = {
{
/* Match for the GPDwin which unfortunately uses somewhat
@@ -81,6 +85,17 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&meegopad_t08_data,
},
+ {
+ /* Point of View TAB-P1006W-232 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Note 105b is Foxcon's USB/PCI vendor id */
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
+ },
+ .driver_data = (void *)&pov_tab_p1006w_data,
+ },
{}
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index ffa243e2e2d0..37b403877228 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -496,6 +496,11 @@ int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
pi = ifp_to_pno(ifp);
+
+ /* No PNO request */
+ if (!pi->n_reqs)
+ return 0;
+
err = brcmf_pno_remove_request(pi, reqid);
if (err)
return err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index a4308c6e72d7..76cfaf6999c8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1550,6 +1550,10 @@ void brcmf_usb_exit(void)
void brcmf_usb_register(void)
{
+ int ret;
+
brcmf_dbg(USB, "Enter\n");
- usb_register(&brcmf_usbdrvr);
+ ret = usb_register(&brcmf_usbdrvr);
+ if (ret)
+ brcmf_err("usb_register failed %d\n", ret);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index ed83f33aceb7..482d7737764d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,9 +16,9 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
- -Idrivers/net/wireless/broadcom/brcm80211/include
+ -I $(srctree)/$(src) \
+ -I $(srctree)/$(src)/phy \
+ -I $(srctree)/$(src)/../include
brcmsmac-y := \
mac80211_if.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
index 3bd54f125776..6d776ef6ff54 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
@@ -37,27 +37,18 @@ static struct dentry *root_folder;
void brcms_debugfs_init(void)
{
root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(root_folder))
- root_folder = NULL;
}
void brcms_debugfs_exit(void)
{
- if (!root_folder)
- return;
-
debugfs_remove_recursive(root_folder);
root_folder = NULL;
}
-int brcms_debugfs_attach(struct brcms_pub *drvr)
+void brcms_debugfs_attach(struct brcms_pub *drvr)
{
- if (!root_folder)
- return -ENODEV;
-
drvr->dbgfs_dir = debugfs_create_dir(
dev_name(&drvr->wlc->hw->d11core->dev), root_folder);
- return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
void brcms_debugfs_detach(struct brcms_pub *drvr)
@@ -195,7 +186,7 @@ static const struct file_operations brcms_debugfs_def_ops = {
.llseek = seq_lseek
};
-static int
+static void
brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
int (*read_fn)(struct seq_file *seq, void *data))
{
@@ -203,27 +194,18 @@ brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn,
struct dentry *dentry = drvr->dbgfs_dir;
struct brcms_debugfs_entry *entry;
- if (IS_ERR_OR_NULL(dentry))
- return -ENOENT;
-
entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
- return -ENOMEM;
+ return;
entry->read = read_fn;
entry->drvr = drvr;
- dentry = debugfs_create_file(fn, 0444, dentry, entry,
- &brcms_debugfs_def_ops);
-
- return PTR_ERR_OR_ZERO(dentry);
+ debugfs_create_file(fn, 0444, dentry, entry, &brcms_debugfs_def_ops);
}
void brcms_debugfs_create_files(struct brcms_pub *drvr)
{
- if (IS_ERR_OR_NULL(drvr->dbgfs_dir))
- return;
-
brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read);
brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h
index 822781cf15d4..56898e6d789d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h
@@ -68,7 +68,7 @@ void __brcms_dbg(struct device *dev, u32 level, const char *func,
struct brcms_pub;
void brcms_debugfs_init(void);
void brcms_debugfs_exit(void);
-int brcms_debugfs_attach(struct brcms_pub *drvr);
+void brcms_debugfs_attach(struct brcms_pub *drvr);
void brcms_debugfs_detach(struct brcms_pub *drvr);
struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr);
void brcms_debugfs_create_files(struct brcms_pub *drvr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index e78a93a45741..c6e107f41948 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1199,8 +1199,6 @@ wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
{
int wait_count = 0;
bool result = true;
- u8 phybw40;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5);
@@ -3082,7 +3080,7 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
u8 bbmult;
struct phytbl_info tab;
s32 a1, b0, b1;
- s32 tssi, pwr, maxtargetpwr, mintargetpwr;
+ s32 tssi, pwr, mintargetpwr;
bool suspend;
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -3119,7 +3117,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
- maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@@ -4212,7 +4209,7 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
s8 index;
struct phytbl_info tab;
s32 a1, b0, b1;
- s32 tssi, pwr, maxtargetpwr, mintargetpwr;
+ s32 tssi, pwr, mintargetpwr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_lastcal = pi->sh->now;
@@ -4249,7 +4246,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
- maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@@ -4622,13 +4618,10 @@ static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
{
uint idx;
- u8 phybw40;
struct phytbl_info tab;
const struct phytbl_info *tb;
u32 val;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
-
for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++)
wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]);
@@ -4831,9 +4824,7 @@ static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
void wlc_phy_init_lcnphy(struct brcms_phy *pi)
{
- u8 phybw40;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- phybw40 = CHSPEC_IS40(pi->radio_chanspec);
pi_lcn->lcnphy_cal_counter = 0;
pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
index 256c91f9ac4b..bb02c6220a88 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
@@ -15,9 +15,7 @@
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-ccflags-y := \
- -Idrivers/net/wireless/broadcom/brcm80211/brcmutil \
- -Idrivers/net/wireless/broadcom/brcm80211/include
+ccflags-y := -I $(srctree)/$(src)/../include
obj-$(CONFIG_BRCMUTIL) += brcmutil.o
brcmutil-objs = utils.o d11.o
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 57e3b6cca234..271977f7fbb0 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3756,10 +3756,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_remove_sysfs;
- err = il_dbgfs_register(il, DRV_NAME);
- if (err)
- IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
- err);
+ il_dbgfs_register(il, DRV_NAME);
/* Start monitoring the killswitch */
queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 6b4488a178a7..94222ae464ae 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4988,10 +4988,7 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto out_unbind;
- err = il_dbgfs_register(il, DRV_NAME);
- if (err)
- IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
- err);
+ il_dbgfs_register(il, DRV_NAME);
err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
if (err) {
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index dc6a74a05983..b079c64ca014 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2974,13 +2974,11 @@ il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
#endif /* CONFIG_IWLEGACY_DEBUG */
#ifdef CONFIG_IWLEGACY_DEBUGFS
-int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_register(struct il_priv *il, const char *name);
void il_dbgfs_unregister(struct il_priv *il);
#else
-static inline int
-il_dbgfs_register(struct il_priv *il, const char *name)
+static inline void il_dbgfs_register(struct il_priv *il, const char *name)
{
- return 0;
}
static inline void
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index d76073def677..fa211412e0ac 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -128,23 +128,12 @@ EXPORT_SYMBOL(il_update_stats);
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, il, \
- &il_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, il, \
+ &il_dbgfs_##name##_ops); \
} while (0)
#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
+ debugfs_create_bool(#name, 0600, parent, ptr); \
} while (0)
/* file operation */
@@ -1341,27 +1330,18 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout);
* Create the debugfs files and directories
*
*/
-int
+void
il_dbgfs_register(struct il_priv *il, const char *name)
{
struct dentry *phyd = il->hw->wiphy->debugfsdir;
struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
il->debugfs_dir = dir_drv;
dir_data = debugfs_create_dir("data", dir_drv);
- if (!dir_data)
- goto err;
dir_rf = debugfs_create_dir("rf", dir_drv);
- if (!dir_rf)
- goto err;
dir_debug = debugfs_create_dir("debug", dir_drv);
- if (!dir_debug)
- goto err;
DEBUGFS_ADD_FILE(nvm, dir_data, 0400);
DEBUGFS_ADD_FILE(sram, dir_data, 0600);
@@ -1399,12 +1379,6 @@ il_dbgfs_register(struct il_priv *il, const char *name)
DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&il->disable_chain_noise_cal);
DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
- return 0;
-
-err:
- IL_ERR("Can't create the debugfs directory\n");
- il_dbgfs_unregister(il);
- return -ENOMEM;
}
EXPORT_SYMBOL(il_dbgfs_register);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 7e65073834b7..eb93711d474b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -83,6 +83,7 @@
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -104,6 +105,8 @@
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_CC_A_MODULE_FIRMWARE(api) \
+ IWL_CC_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -195,8 +198,8 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
IWL_DEVICE_22500,
};
-const struct iwl_cfg iwl22000_2ax_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
+const struct iwl_cfg iwl22560_2ax_cfg_hr = {
+ .name = "Intel(R) Wireless-AX 22560",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -207,6 +210,42 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
+const struct iwl_cfg iwl22260_2ax_cfg = {
+ .name = "Intel(R) Wireless-AX 22260",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg killer1650x_2ax_cfg = {
+ .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (22260NGW)",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg killer1650w_2ax_cfg = {
+ .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (22260D2W)",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
/*
* All JF radio modules are part of the 9000 series, but the MAC part
* looks more like 22000. That's why this device is here, but called
@@ -230,6 +269,12 @@ const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
+const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@@ -242,6 +287,30 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
IWL_DEVICE_22500,
};
+const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
+ .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
+ .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
@@ -324,3 +393,4 @@ MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index f2114137c13f..113bcf7735a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -73,21 +73,12 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
-#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
-#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
-#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000A_MODULE_FIRMWARE(api) \
- IWL9000A_FW_PRE __stringify(api) ".ucode"
-#define IWL9000B_MODULE_FIRMWARE(api) \
- IWL9000B_FW_PRE __stringify(api) ".ucode"
-#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE __stringify(api) ".ucode"
-#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE __stringify(api) ".ucode"
-#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE __stringify(api) ".ucode"
+#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
+#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
+#define IWL9000_MODULE_FIRMWARE(api) \
+ IWL9000_FW_PRE __stringify(api) ".ucode"
+#define IWL9260_MODULE_FIRMWARE(api) \
+ IWL9260_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -162,81 +153,87 @@ static const struct iwl_tt_params iwl9000_tt_params = {
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+};
+
+const struct iwl_cfg iwl9260_2ac_160_cfg = {
+ .name = "Intel(R) Wireless-AC 9260 160MHz",
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9260_killer_2ac_cfg = {
.name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9270_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9270",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9460_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9460_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
};
const struct iwl_cfg iwl9461_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9462_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9260A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_soc = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -244,9 +241,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -254,9 +249,7 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -264,9 +257,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -275,9 +266,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -286,9 +275,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -297,9 +284,16 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
.name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .integrated = true,
+ .soc_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -308,9 +302,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
@@ -319,17 +311,12 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000A_FW_PRE,
- .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
- .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ .fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
-MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 702d42b2d452..0486b17d7c41 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-ccflags-y += -I$(src)/../
+ccflags-y += -I $(srctree)/$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 49b71dbf8490..54b759cec8b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -710,24 +711,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
-{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
- return false;
- return true;
-}
-
-static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
-{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
- return false;
- if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
- return true;
-
- /* disabled by default */
- return false;
-}
-
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
@@ -752,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!iwl_enable_rx_ampdu(priv->cfg))
+ if (!iwl_enable_rx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -764,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
- if (!iwl_enable_tx_ampdu(priv->cfg))
+ if (!iwl_enable_tx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index c219bca5cff4..bd3c3b921d4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1054,7 +1054,7 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
else
IWL_ERR(priv,
- "Cannot request restart before registrating with mac80211\n");
+ "Cannot request restart before registering with mac80211\n");
} else {
WARN_ON(1);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 4de2727ac63e..a156dcf5b7d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -325,9 +326,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
iwl_prepare_ct_kill_task(priv);
tt->state = old_state;
}
- } else if (old_state == IWL_TI_CT_KILL &&
- tt->state != IWL_TI_CT_KILL)
+ } else if (old_state == IWL_TI_CT_KILL) {
iwl_perform_ct_kill_task(priv, false);
+ }
IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
tt->state);
IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 8b4922bbe139..0290b333d860 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -77,7 +77,8 @@
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
- * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
+ * @LOCATION_GROUP: location group, uses command IDs from
+ * &enum iwl_location_subcmd_ids
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
* &enum iwl_prot_offload_subcmd_ids
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
@@ -92,7 +93,7 @@ enum iwl_mvm_command_groups {
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
NAN_GROUP = 0x7,
- TOF_GROUP = 0x8,
+ LOCATION_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
@@ -353,16 +354,6 @@ enum iwl_legacy_cmds {
PHY_DB_CMD = 0x6c,
/**
- * @TOF_CMD: &struct iwl_tof_config_cmd
- */
- TOF_CMD = 0x10,
-
- /**
- * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
- */
- TOF_NOTIFICATION = 0x11,
-
- /**
* @POWER_TABLE_CMD: &struct iwl_device_power_cmd
*/
POWER_TABLE_CMD = 0x77,
@@ -415,7 +406,11 @@ enum iwl_legacy_cmds {
TX_ANT_CONFIGURATION_CMD = 0x98,
/**
- * @STATISTICS_CMD: &struct iwl_statistics_cmd
+ * @STATISTICS_CMD:
+ * one of &struct iwl_statistics_cmd,
+ * &struct iwl_notif_statistics_v11,
+ * &struct iwl_notif_statistics_v10,
+ * &struct iwl_notif_statistics
*/
STATISTICS_CMD = 0x9c,
@@ -423,7 +418,7 @@ enum iwl_legacy_cmds {
* @STATISTICS_NOTIFICATION:
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
- * &struct iwl_notif_statistics_cdb
+ * &struct iwl_notif_statistics
*/
STATISTICS_NOTIFICATION = 0x9d,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 6fae02fa4cad..86ea0784e1a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -224,8 +224,18 @@ struct iwl_wowlan_pattern {
#define IWL_WOWLAN_MAX_PATTERNS 20
+/**
+ * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
+ */
struct iwl_wowlan_patterns_cmd {
+ /**
+ * @n_patterns: number of patterns
+ */
__le32 n_patterns;
+
+ /**
+ * @patterns: the patterns, array length in @n_patterns
+ */
struct iwl_wowlan_pattern patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index fdc54a5dc9de..93c06e6c1ced 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -105,6 +105,12 @@ enum iwl_data_path_subcmd_ids {
HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
/**
+ * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI
+ * matrix collection, uses &struct iwl_channel_estimation_cfg
+ */
+ CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
+
+ /**
* @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
*/
RX_NO_DATA_NOTIF = 0xF5,
@@ -156,4 +162,53 @@ struct iwl_mu_group_mgmt_notif {
__le32 user_position[4];
} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
+enum iwl_channel_estimation_flags {
+ IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0),
+ IWL_CHANNEL_ESTIMATION_TIMER = BIT(1),
+ IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2),
+};
+
+/**
+ * struct iwl_channel_estimation_cfg - channel estimation reporting config
+ */
+struct iwl_channel_estimation_cfg {
+ /**
+ * @flags: flags, see &enum iwl_channel_estimation_flags
+ */
+ __le32 flags;
+ /**
+ * @timer: if enabled via flags, automatically disable after this many
+ * microseconds
+ */
+ __le32 timer;
+ /**
+ * @count: if enabled via flags, automatically disable after this many
+ * frames with channel estimation matrix were captured
+ */
+ __le32 count;
+ /**
+ * @rate_n_flags_mask: only try to record the channel estimation matrix
+ * if the rate_n_flags value for the received frame (let's call
+ * that rx_rnf) matches the mask/value given here like this:
+ * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val.
+ */
+ __le32 rate_n_flags_mask;
+ /**
+ * @rate_n_flags_val: see @rate_n_flags_mask
+ */
+ __le32 rate_n_flags_val;
+ /**
+ * @reserved: reserved (for alignment)
+ */
+ __le32 reserved;
+ /**
+ * @frame_types: bitmap of frame types to capture, the received frame's
+ * subtype|type takes 6 bits in the frame and the corresponding bit
+ * in this field must be set to 1 to capture channel estimation for
+ * that frame type. Set to all-ones to enable capturing for all
+ * frame types.
+ */
+ __le64 frame_types;
+} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index dc1fa377087a..988584973aba 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -335,29 +335,11 @@ struct iwl_dbg_mem_access_rsp {
__le32 data[];
} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
-#define CONT_REC_COMMAND_SIZE 80
-#define ENABLE_CONT_RECORDING 0x15
-#define DISABLE_CONT_RECORDING 0x16
+#define LDBG_CFG_COMMAND_SIZE 80
#define BUFFER_ALLOCATION 0x27
#define START_DEBUG_RECORDING 0x29
#define STOP_DEBUG_RECORDING 0x2A
-/*
- * struct iwl_continuous_record_mode - recording mode
- */
-struct iwl_continuous_record_mode {
- __le16 enable_recording;
-} __packed;
-
-/*
- * struct iwl_continuous_record_cmd - enable/disable continuous recording
- */
-struct iwl_continuous_record_cmd {
- struct iwl_continuous_record_mode record_mode;
- u8 pad[CONT_REC_COMMAND_SIZE -
- sizeof(struct iwl_continuous_record_mode)];
-} __packed;
-
/* maximum fragments to be allocated per target of allocationId */
#define IWL_BUFFER_LOCATION_MAX_FRAGS 2
@@ -385,4 +367,17 @@ struct iwl_buffer_allocation_cmd {
struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
+/**
+ * struct iwl_ldbg_config_cmd - LDBG config command
+ * @type: configuration type
+ * @pad: reserved space for type-dependent data
+ */
+struct iwl_ldbg_config_cmd {
+ __le32 type;
+ union {
+ u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)];
+ struct iwl_buffer_allocation_cmd buffer_allocation;
+ }; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */
+} __packed; /* LDBG_CFG_CMD_API_S_VER_2 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
new file mode 100644
index 000000000000..6da91ec0df55
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -0,0 +1,711 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_location_h__
+#define __iwl_fw_api_location_h__
+
+/**
+ * enum iwl_location_subcmd_ids - location group command IDs
+ */
+enum iwl_location_subcmd_ids {
+ /**
+ * @TOF_RANGE_REQ_CMD: TOF ranging request,
+ * uses &struct iwl_tof_range_req_cmd
+ */
+ TOF_RANGE_REQ_CMD = 0x0,
+ /**
+ * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd
+ */
+ TOF_CONFIG_CMD = 0x1,
+ /**
+ * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses
+ * &struct iwl_tof_range_abort_cmd
+ */
+ TOF_RANGE_ABORT_CMD = 0x2,
+ /**
+ * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config,
+ * uses &struct iwl_tof_range_request_ext_cmd
+ */
+ TOF_RANGE_REQ_EXT_CMD = 0x3,
+ /**
+ * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration,
+ * uses &struct iwl_tof_responder_config_cmd
+ */
+ TOF_RESPONDER_CONFIG_CMD = 0x4,
+ /**
+ * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration,
+ * uses &struct iwl_tof_responder_dyn_config_cmd
+ */
+ TOF_RESPONDER_DYN_CONFIG_CMD = 0x5,
+ /**
+ * @CSI_HEADER_NOTIFICATION: CSI header
+ */
+ CSI_HEADER_NOTIFICATION = 0xFA,
+ /**
+ * @CSI_CHUNKS_NOTIFICATION: CSI chunk,
+ * uses &struct iwl_csi_chunk_notification
+ */
+ CSI_CHUNKS_NOTIFICATION = 0xFB,
+ /**
+ * @TOF_LC_NOTIF: used for LCI/civic location, contains just
+ * the action frame
+ */
+ TOF_LC_NOTIF = 0xFC,
+ /**
+ * @TOF_RESPONDER_STATS: FTM responder statistics notification,
+ * uses &struct iwl_ftm_responder_stats
+ */
+ TOF_RESPONDER_STATS = 0xFD,
+ /**
+ * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses
+ * &struct iwl_tof_mcsi_notif
+ */
+ TOF_MCSI_DEBUG_NOTIF = 0xFE,
+ /**
+ * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using
+ * &struct iwl_tof_range_rsp_ntfy
+ */
+ TOF_RANGE_RESPONSE_NOTIF = 0xFF,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: indicates if ToF is disabled (or not)
+ * @one_sided_disabled: indicates if one-sided is disabled (or not)
+ * @is_debug_mode: indiciates if debug mode is active
+ * @is_buf_required: indicates if channel estimation buffer is required
+ */
+struct iwl_tof_config_cmd {
+ u8 tof_disabled;
+ u8 one_sided_disabled;
+ u8 is_debug_mode;
+ u8 is_buf_required;
+} __packed;
+
+/**
+ * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth
+ * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT
+ * @IWL_TOF_BW_20_HT: 20 MHz HT
+ * @IWL_TOF_BW_40: 40 MHz
+ * @IWL_TOF_BW_80: 80 MHz
+ * @IWL_TOF_BW_160: 160 MHz
+ */
+enum iwl_tof_bandwidth {
+ IWL_TOF_BW_20_LEGACY,
+ IWL_TOF_BW_20_HT,
+ IWL_TOF_BW_40,
+ IWL_TOF_BW_80,
+ IWL_TOF_BW_160,
+}; /* LOCAT_BW_TYPE_E */
+
+/*
+ * enum iwl_tof_algo_type - Algorithym type for range measurement request
+ */
+enum iwl_tof_algo_type {
+ IWL_TOF_ALGO_TYPE_MAX_LIKE = 0,
+ IWL_TOF_ALGO_TYPE_LINEAR_REG = 1,
+ IWL_TOF_ALGO_TYPE_FFT = 2,
+
+ /* Keep last */
+ IWL_TOF_ALGO_TYPE_INVALID,
+}; /* ALGO_TYPE_E */
+
+/*
+ * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications
+ */
+enum iwl_tof_mcsi_enable {
+ IWL_TOF_MCSI_DISABLED = 0,
+ IWL_TOF_MCSI_ENABLED = 1,
+}; /* MCSI_ENABLE_E */
+
+/**
+ * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg
+ * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is
+ * valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report
+ * support is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure
+ * is valid
+ * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid
+ */
+enum iwl_tof_responder_cmd_valid_field {
+ IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0),
+ IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1),
+ IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2),
+ IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3),
+ IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4),
+ IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5),
+ IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6),
+ IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7),
+ IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8),
+ IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9),
+ IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10),
+ IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11),
+ IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12),
+};
+
+/**
+ * enum iwl_tof_responder_cfg_flags - responder configuration flags
+ * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support
+ * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics
+ * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI
+ * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type
+ * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode
+ * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode
+ * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode
+ * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support
+ * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail
+ * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask
+ */
+enum iwl_tof_responder_cfg_flags {
+ IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0),
+ IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1),
+ IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2),
+ IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5),
+ IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6),
+ IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7),
+ IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8),
+ IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9),
+ IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10),
+ IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK,
+};
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth
+ * @rate: current AP rate
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @reserved1: reserved
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @reserved2: reserved
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 bandwidth;
+ u8 rate;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 reserved1;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ __le16 reserved2;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */
+
+#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
+
+/**
+ * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
+ * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
+ * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
+ * needed, 0-padding such that the next part is dword-aligned, then CIVIC
+ * data (if exists) follows, and then 0-padding again to complete a
+ * 4-multiple long buffer.
+ */
+struct iwl_tof_responder_dyn_config_cmd {
+ __le32 lci_len;
+ __le32 civic_len;
+ u8 lci_civic[];
+} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @reserved: reserved
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ * in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ * value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ * value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ * value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+ __le16 tsf_timer_offset_msec;
+ __le16 reserved;
+ u8 min_delta_ftm;
+ u8 ftm_format_and_bw20M;
+ u8 ftm_format_and_bw40M;
+ u8 ftm_format_and_bw80M;
+} __packed;
+
+/**
+ * enum iwl_tof_location_query - values for query bitmap
+ * @IWL_TOF_LOC_LCI: query LCI
+ * @IWL_TOF_LOC_CIVIC: query civic
+ */
+enum iwl_tof_location_query {
+ IWL_TOF_LOC_LCI = 0x01,
+ IWL_TOF_LOC_CIVIC = 0x02,
+};
+
+ /**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth.
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @bssid: AP's BSSID
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
+ * number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31);
+ * 1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ * in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ * The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR;
+ * Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ * 0: Initiator interact with regular AP;
+ * 1: Initiator interact with Responder machine: need to send the
+ * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ * use it for its ch est measurement (this flag will be set when we
+ * configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ * @algo_type: &enum iwl_tof_algo_type
+ * @notify_mcsi: &enum iwl_tof_mcsi_ntfy.
+ * @reserved: For alignment and future use
+ */
+struct iwl_tof_range_req_ap_entry {
+ u8 channel_num;
+ u8 bandwidth;
+ u8 tsf_delta_direction;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ u8 measure_type;
+ u8 num_of_bursts;
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 retries_per_sample;
+ __le32 tsf_delta;
+ u8 location_req;
+ u8 asap_mode;
+ u8 enable_dyn_ack;
+ s8 rssi;
+ u8 algo_type;
+ u8 notify_mcsi;
+ __le16 reserved;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
+ * possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon
+ * timeout expiration
+ * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the
+ * earlier of: measurements completion / timeout
+ * expiration.
+ */
+enum iwl_tof_response_mode {
+ IWL_MVM_TOF_RESPONSE_ASAP,
+ IWL_MVM_TOF_RESPONSE_TIMEOUT,
+ IWL_MVM_TOF_RESPONSE_COMPLETE,
+};
+
+/**
+ * enum iwl_tof_initiator_flags
+ *
+ * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run
+ * the algo on ant A+B, instead of only one of them.
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM
+ * @IWL_TOF_INITIATOR_FLAGS_MINDELTA_NO_PREF: no preference for minDeltaFTM
+ */
+enum iwl_tof_initiator_flags {
+ IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2),
+ IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5),
+ IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6),
+ IWL_TOF_INITIATOR_FLAGS_MINDELTA_NO_PREF = BIT(7),
+}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
+
+#define IWL_MVM_TOF_MAX_APS 5
+#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @initiator: 0- NW initiated, 1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ * '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ * This is equivalent to the session time configured to the
+ * LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ * the range report will be uploaded as a batch when ready or
+ * when the session is done (successfully / partially).
+ * one of iwl_tof_response_mode.
+ * @reserved0: reserved
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ * '1' Use MAC Address randomization according to the below
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT)
+ * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT)
+ * @common_calib: The common calib value to inject to this measurement calc
+ * @specific_calib: The specific calib value to inject to this measurement calc
+ * @ap: per-AP request data
+ */
+struct iwl_tof_range_req_cmd {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 initiator;
+ u8 one_sided_los_disable;
+ u8 req_timeout;
+ u8 report_policy;
+ u8 reserved0;
+ u8 num_of_ap;
+ u8 macaddr_random;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 ftm_rx_chains;
+ u8 ftm_tx_chains;
+ __le16 common_calib;
+ __le16 specific_calib;
+ struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
+
+/*
+ * enum iwl_tof_range_request_status - status of the sent request
+ * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
+ * request
+ * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the
+ * sent request will not be handled
+ */
+enum iwl_tof_range_request_status {
+ IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS,
+ IWL_TOF_RANGE_REQUEST_STATUS_BUSY,
+};
+
+/**
+ * enum iwl_tof_entry_status
+ *
+ * @IWL_TOF_ENTRY_SUCCESS: successful measurement.
+ * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure.
+ * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request.
+ * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request.
+ * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet.
+ * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no
+ * measurement was completed.
+ * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch
+ * from the primary channel.
+ * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM.
+ * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown
+ * reason.
+ * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid
+ * T1/T4.
+ * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame
+ * structure.
+ * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled.
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the
+ * initiator for some period, period supplied in @refusal_period.
+ * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments.
+ * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled.
+ * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original
+ * parameters within the current session.
+ */
+enum iwl_tof_entry_status {
+ IWL_TOF_ENTRY_SUCCESS = 0,
+ IWL_TOF_ENTRY_GENERAL_FAILURE = 1,
+ IWL_TOF_ENTRY_NO_RESPONSE = 2,
+ IWL_TOF_ENTRY_REQUEST_REJECTED = 3,
+ IWL_TOF_ENTRY_NOT_SCHEDULED = 4,
+ IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5,
+ IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6,
+ IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7,
+ IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8,
+ IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9,
+ IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10,
+ IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11,
+ IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12,
+ IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13,
+ IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14,
+ IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15,
+}; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @reserved: reserved
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 reserved;
+ u8 refusal_period;
+ __le32 range;
+ __le32 range_variance;
+ __le32 timestamp;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
+
+/**
+ * enum iwl_tof_response_status - tof response status
+ *
+ * @IWL_TOF_RESPONSE_SUCCESS: successful range.
+ * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration.
+ * partial result of ranges done so far is included in the response.
+ * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command.
+ * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed.
+ */
+enum iwl_tof_response_status {
+ IWL_TOF_RESPONSE_SUCCESS = 0,
+ IWL_TOF_RESPONSE_TIMEOUT = 1,
+ IWL_TOF_RESPONSE_ABORTED = 4,
+ IWL_TOF_RESPONSE_FAILED = 5,
+}; /* LOCATION_RNG_RSP_STATUS */
+
+/**
+ * struct iwl_tof_range_rsp_ntfy - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session, one of
+ * &enum iwl_tof_response_status.
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy {
+ u8 request_id;
+ u8 request_status;
+ u8 last_in_batch;
+ u8 num_of_aps;
+ struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @reserved: reserved
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+ u8 token;
+ u8 role;
+ __le16 reserved;
+ u8 initiator_bssid[ETH_ALEN];
+ u8 responder_bssid[ETH_ALEN];
+ u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ * @reserved: reserved
+ */
+struct iwl_tof_range_abort_cmd {
+ u8 request_id;
+ u8 reserved[3];
+} __packed;
+
+enum ftm_responder_stats_flags {
+ FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0),
+ FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1),
+ FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2),
+ FTM_RESP_STAT_TRIGGER_DUP = BIT(3),
+ FTM_RESP_STAT_DUP = BIT(4),
+ FTM_RESP_STAT_DUP_IN_WIN = BIT(5),
+ FTM_RESP_STAT_DUP_OUT_WIN = BIT(6),
+ FTM_RESP_STAT_SCHED_SUCCESS = BIT(7),
+ FTM_RESP_STAT_ASAP_REQ = BIT(8),
+ FTM_RESP_STAT_NON_ASAP_REQ = BIT(9),
+ FTM_RESP_STAT_ASAP_RESP = BIT(10),
+ FTM_RESP_STAT_NON_ASAP_RESP = BIT(11),
+ FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12),
+ FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13),
+ FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14),
+ FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15),
+ FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16),
+ FTM_RESP_STAT_FAIL_GC = BIT(17),
+ FTM_RESP_STAT_SUCCESS = BIT(18),
+ FTM_RESP_STAT_INTEL_IE = BIT(19),
+ FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20),
+ FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21),
+ FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22),
+ FTM_RESP_STAT_PROCESS_FAIL = BIT(23),
+ FTM_RESP_STAT_ACK = BIT(24),
+ FTM_RESP_STAT_NACK = BIT(25),
+ FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26),
+ FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27),
+ FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28),
+ FTM_RESP_STAT_INITIATOR_ADDED = BIT(29),
+ FTM_RESP_STAT_ERR_LIST_FULL = BIT(30),
+ FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31),
+}; /* RESP_IND_E */
+
+/**
+ * struct iwl_ftm_responder_stats - FTM responder statistics
+ * @addr: initiator address
+ * @success_ftm: number of successful ftm frames
+ * @ftm_per_burst: num of FTM frames that were received
+ * @flags: &enum ftm_responder_stats_flags
+ * @duration: actual duration of FTM
+ * @allocated_duration: time that was allocated for this FTM session
+ * @bw: FTM request bandwidth
+ * @rate: FTM request rate
+ * @reserved: for alingment and future use
+ */
+struct iwl_ftm_responder_stats {
+ u8 addr[ETH_ALEN];
+ u8 success_ftm;
+ u8 ftm_per_burst;
+ __le32 flags;
+ __le32 duration;
+ __le32 allocated_duration;
+ u8 bw;
+ u8 rate;
+ __le16 reserved;
+} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */
+
+#define IWL_CSI_CHUNK_CTL_NUM_MASK 0x3
+#define IWL_CSI_CHUNK_CTL_IDX_MASK 0xc
+
+struct iwl_csi_chunk_notification {
+ __le32 token;
+ __le16 seq;
+ __le16 ctl;
+ __le32 size;
+ u8 data[];
+} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_location_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 45f61c6af14e..b833b80ea3d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -95,17 +97,36 @@
#define PHY_VHT_CTRL_POS_4_ABOVE (0x7)
/*
+ * struct iwl_fw_channel_info_v1 - channel information
+ *
* @band: PHY_BAND_*
* @channel: channel number
* @width: PHY_[VHT|LEGACY]_CHANNEL_*
* @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
*/
-struct iwl_fw_channel_info {
+struct iwl_fw_channel_info_v1 {
u8 band;
u8 channel;
u8 width;
u8 ctrl_pos;
-} __packed;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwl_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwl_fw_channel_info {
+ __le32 channel;
+ u8 band;
+ u8 width;
+ u8 ctrl_pos;
+ u8 reserved;
+} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
@@ -134,6 +155,22 @@ struct iwl_fw_channel_info {
/* TODO: complete missing documentation */
/**
+ * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with
+ * various channel structures.
+ *
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd_tail {
+ __le32 txchain_info;
+ __le32 rxchain_info;
+ __le32 acquisition_data;
+ __le32 dsp_cfg_flags;
+} __packed;
+
+/**
* struct iwl_phy_context_cmd - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
@@ -142,10 +179,7 @@ struct iwl_fw_channel_info {
* other value means apply new params after X usecs
* @tx_param_color: ???
* @ci: channel info
- * @txchain_info: ???
- * @rxchain_info: ???
- * @acquisition_data: ???
- * @dsp_cfg_flags: set to 0
+ * @tail: command tail
*/
struct iwl_phy_context_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -155,10 +189,7 @@ struct iwl_phy_context_cmd {
__le32 apply_time;
__le32 tx_param_color;
struct iwl_fw_channel_info ci;
- __le32 txchain_info;
- __le32 rxchain_info;
- __le32 acquisition_data;
- __le32 dsp_cfg_flags;
+ struct iwl_phy_context_cmd_tail tail;
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 0791a854fc8f..6e8224ce8906 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -209,8 +209,6 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
* @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
- * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status
- * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2
*/
enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
@@ -238,8 +236,6 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
- RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
- RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
};
/* 9000 series API */
@@ -337,6 +333,8 @@ enum iwl_rx_mpdu_phy_info {
IWL_RX_MPDU_PHY_AMPDU = BIT(5),
IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
+ /* short preamble is only for CCK, for non-CCK overridden by this */
+ IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7),
IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
};
@@ -723,6 +721,9 @@ struct iwl_rx_mpdu_desc {
#define RX_NO_DATA_FRAME_TIME_POS 0
#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
+#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000
+#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000
+
/**
* struct iwl_rx_no_data - RX no data descriptor
* @info: 7:0 frame type, 15:8 RX error type
@@ -743,7 +744,7 @@ struct iwl_rx_no_data {
__le32 fr_time;
__le32 rate;
__le32 phy_info[2];
- __le32 rx_vec[3];
+ __le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 53cb622aa9ab..318843138490 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,6 +30,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -363,14 +365,7 @@ struct mvm_statistics_general_v8 {
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
-struct mvm_statistics_general_cdb_v9 {
- struct mvm_statistics_general_common_v19 common;
- __le32 beacon_counter[NUM_MAC_INDEX_CDB];
- u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
- u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
-} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
-
-struct mvm_statistics_general_cdb {
+struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_counter[MAC_INDEX_AUX];
u8 beacon_average_energy[MAC_INDEX_AUX];
@@ -435,11 +430,11 @@ struct iwl_notif_statistics_v11 {
struct mvm_statistics_load_v1 load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
-struct iwl_notif_statistics_cdb {
+struct iwl_notif_statistics {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
- struct mvm_statistics_general_cdb general;
+ struct mvm_statistics_general general;
struct mvm_statistics_load load_stats;
} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 7c6c2462d0e8..b089285ac466 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,6 +113,17 @@ struct iwl_tdls_channel_switch_frame {
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
/**
+ * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd
+ *
+ * @timing: timing related data for command
+ * @frame: channel-switch request/response template, depending to switch_type
+ */
+struct iwl_tdls_channel_switch_cmd_tail {
+ struct iwl_tdls_channel_switch_timing timing;
+ struct iwl_tdls_channel_switch_frame frame;
+} __packed;
+
+/**
* struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
*
* The command is sent to initiate a channel switch and also in response to
@@ -119,15 +132,13 @@ struct iwl_tdls_channel_switch_frame {
* @switch_type: see &enum iwl_tdls_channel_switch_type
* @peer_sta_id: station id of TDLS peer
* @ci: channel we switch to
- * @timing: timing related data for command
- * @frame: channel-switch request/response template, depending to switch_type
+ * @tail: command tail
*/
struct iwl_tdls_channel_switch_cmd {
u8 switch_type;
__le32 peer_sta_id;
struct iwl_fw_channel_info ci;
- struct iwl_tdls_channel_switch_timing timing;
- struct iwl_tdls_channel_switch_frame frame;
+ struct iwl_tdls_channel_switch_cmd_tail tail;
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index f824bebceb06..4621ef93a2cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -318,6 +320,25 @@ struct iwl_time_event_notif {
} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
/*
+ * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req
+ *
+ * @node_addr: Our MAC Address
+ * @reserved: reserved for alignment
+ * @apply_time: GP2 value to start (should always be the current GP2 value)
+ * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
+ * time by which start of the event is allowed to be postponed.
+ * @duration: event duration in TU To calculate event duration:
+ * timeEventDuration = min(duration, remainingQuota)
+ */
+struct iwl_hs20_roc_req_tail {
+ u8 node_addr[ETH_ALEN];
+ __le16 reserved;
+ __le32 apply_time;
+ __le32 apply_time_max_delay;
+ __le32 duration;
+} __packed;
+
+/*
* Aux ROC command
*
* Command requests the firmware to create a time event for a certain duration
@@ -336,13 +357,6 @@ struct iwl_time_event_notif {
* @sta_id_and_color: station id and color, resumed during "Remain On Channel"
* activity.
* @channel_info: channel info
- * @node_addr: Our MAC Address
- * @reserved: reserved for alignment
- * @apply_time: GP2 value to start (should always be the current GP2 value)
- * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
- * time by which start of the event is allowed to be postponed.
- * @duration: event duration in TU To calculate event duration:
- * timeEventDuration = min(duration, remainingQuota)
*/
struct iwl_hs20_roc_req {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
@@ -351,11 +365,7 @@ struct iwl_hs20_roc_req {
__le32 event_unique_id;
__le32 sta_id_and_color;
struct iwl_fw_channel_info channel_info;
- u8 node_addr[ETH_ALEN];
- __le16 reserved;
- __le32 apply_time;
- __le32 apply_time_max_delay;
- __le32 duration;
+ struct iwl_hs20_roc_req_tail tail;
} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h
deleted file mode 100644
index 7328a1606146..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h
+++ /dev/null
@@ -1,393 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_fw_api_tof_h__
-#define __iwl_fw_api_tof_h__
-
-/* ToF sub-group command IDs */
-enum iwl_mvm_tof_sub_grp_ids {
- TOF_RANGE_REQ_CMD = 0x1,
- TOF_CONFIG_CMD = 0x2,
- TOF_RANGE_ABORT_CMD = 0x3,
- TOF_RANGE_REQ_EXT_CMD = 0x4,
- TOF_RESPONDER_CONFIG_CMD = 0x5,
- TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
- TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
- TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
- TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
- TOF_RANGE_RESPONSE_NOTIF = 0xFE,
- TOF_MCSI_DEBUG_NOTIF = 0xFB,
-};
-
-/**
- * struct iwl_tof_config_cmd - ToF configuration
- * @tof_disabled: 0 enabled, 1 - disabled
- * @one_sided_disabled: 0 enabled, 1 - disabled
- * @is_debug_mode: 1 debug mode, 0 - otherwise
- * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
- */
-struct iwl_tof_config_cmd {
- __le32 sub_grp_cmd_id;
- u8 tof_disabled;
- u8 one_sided_disabled;
- u8 is_debug_mode;
- u8 is_buf_required;
-} __packed;
-
-/**
- * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
- * @burst_period: future use: (currently hard coded in the LMAC)
- * The interval between two sequential bursts.
- * @min_delta_ftm: future use: (currently hard coded in the LMAC)
- * The minimum delay between two sequential FTM Responses
- * in the same burst.
- * @burst_duration: future use: (currently hard coded in the LMAC)
- * The total time for all FTMs handshake in the same burst.
- * Affect the time events duration in the LMAC.
- * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
- * The number of bursts for the current ToF request. Affect
- * the number of events allocations in the current iteration.
- * @get_ch_est: for xVT only, NA for driver
- * @abort_responder: when set to '1' - Responder will terminate its activity
- * (all other fields in the command are ignored)
- * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
- * params and use the recomended Initiator params.
- * 0 - otherwise
- * @channel_num: current AP Channel
- * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
- * @rate: current AP rate
- * @ctrl_ch_position: coding of the control channel position relative to
- * the center frequency:
- *
- * 40 MHz
- * 0 below center, 1 above center
- *
- * 80 MHz
- * bits [0..1]
- * * 0 the near 20MHz to the center,
- * * 1 the far 20MHz to the center
- * bit[2]
- * as above 40MHz
- * @ftm_per_burst: FTMs per Burst
- * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
- * '1' - we measure over the Initial FTM Response
- * @asap_mode: ASAP / Non ASAP mode for the current WLS station
- * @sta_id: index of the AP STA when in AP mode
- * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
- * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
- * purposes, simulating station movement by adding various values
- * to this field
- * @bssid: Current AP BSSID
- */
-struct iwl_tof_responder_config_cmd {
- __le32 sub_grp_cmd_id;
- __le16 burst_period;
- u8 min_delta_ftm;
- u8 burst_duration;
- u8 num_of_burst_exp;
- u8 get_ch_est;
- u8 abort_responder;
- u8 recv_sta_req_params;
- u8 channel_num;
- u8 bandwidth;
- u8 rate;
- u8 ctrl_ch_position;
- u8 ftm_per_burst;
- u8 ftm_resp_ts_avail;
- u8 asap_mode;
- u8 sta_id;
- __le16 tsf_timer_offset_msecs;
- __le16 toa_offset;
- u8 bssid[ETH_ALEN];
-} __packed;
-
-/**
- * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
- * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
- * @reserved: reserved
- * @min_delta_ftm: Minimal time between two consecutive measurements,
- * in units of 100us. 0 means no preference by station
- * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
- * value be sent to the AP
- * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
- * value to be sent to the AP
- * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
- * value to be sent to the AP
- */
-struct iwl_tof_range_req_ext_cmd {
- __le32 sub_grp_cmd_id;
- __le16 tsf_timer_offset_msec;
- __le16 reserved;
- u8 min_delta_ftm;
- u8 ftm_format_and_bw20M;
- u8 ftm_format_and_bw40M;
- u8 ftm_format_and_bw80M;
-} __packed;
-
-#define IWL_MVM_TOF_MAX_APS 21
-
-/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
- * @channel_num: Current AP Channel
- * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
- * @tsf_delta_direction: TSF relatively to the subject AP
- * @ctrl_ch_position: Coding of the control channel position relative to the
- * center frequency.
- * 40MHz 0 below center, 1 above center
- * 80MHz bits [0..1]: 0 the near 20MHz to the center,
- * 1 the far 20MHz to the center
- * bit[2] as above 40MHz
- * @bssid: AP's bss id
- * @measure_type: Measurement type: 0 - two sided, 1 - One sided
- * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the
- * number of measurement iterations (min 2^0 = 1, max 2^14)
- * @burst_period: Recommended value to be sent to the AP. Measurement
- * periodicity In units of 100ms. ignored if num_of_bursts = 0
- * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
- * 1-sided: how many rts/cts pairs should be used per burst.
- * @retries_per_sample: Max number of retries that the LMAC should send
- * in case of no replies by the AP.
- * @tsf_delta: TSF Delta in units of microseconds.
- * The difference between the AP TSF and the device local clock.
- * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
- * Bit[1] Civic should be sent in the FTMR
- * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
- * @enable_dyn_ack: Enable Dynamic ACK BW.
- * 0 Initiator interact with regular AP
- * 1 Initiator interact with Responder machine: need to send the
- * Initiator Acks with HT 40MHz / 80MHz, since the Responder should
- * use it for its ch est measurement (this flag will be set when we
- * configure the opposite machine to be Responder).
- * @rssi: Last received value
- * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
- */
-struct iwl_tof_range_req_ap_entry {
- u8 channel_num;
- u8 bandwidth;
- u8 tsf_delta_direction;
- u8 ctrl_ch_position;
- u8 bssid[ETH_ALEN];
- u8 measure_type;
- u8 num_of_bursts;
- __le16 burst_period;
- u8 samples_per_burst;
- u8 retries_per_sample;
- __le32 tsf_delta;
- u8 location_req;
- u8 asap_mode;
- u8 enable_dyn_ack;
- s8 rssi;
-} __packed;
-
-/**
- * enum iwl_tof_response_mode
- * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
- * possible (not supported for this release)
- * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
- * timeout expiration
- * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
- * earlier of: measurements completion / timeout
- * expiration.
- */
-enum iwl_tof_response_mode {
- IWL_MVM_TOF_RESPOSE_ASAP = 1,
- IWL_MVM_TOF_RESPOSE_TIMEOUT,
- IWL_MVM_TOF_RESPOSE_COMPLETE,
-};
-
-/**
- * struct iwl_tof_range_req_cmd - start measurement cmd
- * @request_id: A Token incremented per request. The same Token will be
- * sent back in the range response
- * @initiator: 0- NW initiated, 1 - Client Initiated
- * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
- * '1' - run ML-Algo for ToF only
- * @req_timeout: Requested timeout of the response in units of 100ms.
- * This is equivalent to the session time configured to the
- * LMAC in Initiator Request
- * @report_policy: Supported partially for this release: For current release -
- * the range report will be uploaded as a batch when ready or
- * when the session is done (successfully / partially).
- * one of iwl_tof_response_mode.
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
- * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
- * '1' Use MAC Address randomization according to the below
- * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
- * Bits set to 1 shall be randomized by the UMAC
- * @ap: per-AP request data
- */
-struct iwl_tof_range_req_cmd {
- __le32 sub_grp_cmd_id;
- u8 request_id;
- u8 initiator;
- u8 one_sided_los_disable;
- u8 req_timeout;
- u8 report_policy;
- u8 los_det_disable;
- u8 num_of_ap;
- u8 macaddr_random;
- u8 macaddr_template[ETH_ALEN];
- u8 macaddr_mask[ETH_ALEN];
- struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
-} __packed;
-
-/**
- * struct iwl_tof_gen_resp_cmd - generic ToF response
- */
-struct iwl_tof_gen_resp_cmd {
- __le32 sub_grp_cmd_id;
- u8 data[];
-} __packed;
-
-/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
- * @bssid: BSSID of the AP
- * @measure_status: current APs measurement status, one of
- * &enum iwl_tof_entry_status.
- * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
- * @rtt: The Round Trip Time that took for the last measurement for
- * current AP [nSec]
- * @rtt_variance: The Variance of the RTT values measured for current AP
- * @rtt_spread: The Difference between the maximum and the minimum RTT
- * values measured for current AP in the current session [nsec]
- * @rssi: RSSI as uploaded in the Channel Estimation notification
- * @rssi_spread: The Difference between the maximum and the minimum RSSI values
- * measured for current AP in the current session
- * @reserved: reserved
- * @range: Measured range [cm]
- * @range_variance: Measured range variance [cm]
- * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
- * uploaded by the LMAC
- */
-struct iwl_tof_range_rsp_ap_entry_ntfy {
- u8 bssid[ETH_ALEN];
- u8 measure_status;
- u8 measure_bw;
- __le32 rtt;
- __le32 rtt_variance;
- __le32 rtt_spread;
- s8 rssi;
- u8 rssi_spread;
- __le16 reserved;
- __le32 range;
- __le32 range_variance;
- __le32 timestamp;
-} __packed;
-
-/**
- * struct iwl_tof_range_rsp_ntfy -
- * @request_id: A Token ID of the corresponding Range request
- * @request_status: status of current measurement session
- * @last_in_batch: reprot policy (when not all responses are uploaded at once)
- * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
- * @ap: per-AP data
- */
-struct iwl_tof_range_rsp_ntfy {
- u8 request_id;
- u8 request_status;
- u8 last_in_batch;
- u8 num_of_aps;
- struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
-} __packed;
-
-#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
-/**
- * struct iwl_tof_mcsi_notif - used for debug
- * @token: token ID for the current session
- * @role: '0' - initiator, '1' - responder
- * @reserved: reserved
- * @initiator_bssid: initiator machine
- * @responder_bssid: responder machine
- * @mcsi_buffer: debug data
- */
-struct iwl_tof_mcsi_notif {
- u8 token;
- u8 role;
- __le16 reserved;
- u8 initiator_bssid[ETH_ALEN];
- u8 responder_bssid[ETH_ALEN];
- u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
-} __packed;
-
-/**
- * struct iwl_tof_neighbor_report_notif
- * @bssid: BSSID of the AP which sent the report
- * @request_token: same token as the corresponding request
- * @status:
- * @report_ie_len: the length of the response frame starting from the Element ID
- * @data: the IEs
- */
-struct iwl_tof_neighbor_report {
- u8 bssid[ETH_ALEN];
- u8 request_token;
- u8 status;
- __le16 report_ie_len;
- u8 data[];
-} __packed;
-
-/**
- * struct iwl_tof_range_abort_cmd
- * @request_id: corresponds to a range request
- * @reserved: reserved
- */
-struct iwl_tof_range_abort_cmd {
- __le32 sub_grp_cmd_id;
- u8 request_id;
- u8 reserved[3];
-} __packed;
-
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 2a19b178c5e8..22efd94da6d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -469,6 +469,93 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a02400, .end = 0x00a02758 },
};
+static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
+ { .start = 0x00a00000, .end = 0x00a00000 },
+ { .start = 0x00a0000c, .end = 0x00a00024 },
+ { .start = 0x00a0002c, .end = 0x00a00034 },
+ { .start = 0x00a0003c, .end = 0x00a0003c },
+ { .start = 0x00a00410, .end = 0x00a00418 },
+ { .start = 0x00a00420, .end = 0x00a00420 },
+ { .start = 0x00a00428, .end = 0x00a00428 },
+ { .start = 0x00a00430, .end = 0x00a0043c },
+ { .start = 0x00a00444, .end = 0x00a00444 },
+ { .start = 0x00a00840, .end = 0x00a00840 },
+ { .start = 0x00a00850, .end = 0x00a00858 },
+ { .start = 0x00a01004, .end = 0x00a01008 },
+ { .start = 0x00a01010, .end = 0x00a01010 },
+ { .start = 0x00a01018, .end = 0x00a01018 },
+ { .start = 0x00a01024, .end = 0x00a01024 },
+ { .start = 0x00a0102c, .end = 0x00a01034 },
+ { .start = 0x00a0103c, .end = 0x00a01040 },
+ { .start = 0x00a01048, .end = 0x00a01050 },
+ { .start = 0x00a01058, .end = 0x00a01058 },
+ { .start = 0x00a01060, .end = 0x00a01070 },
+ { .start = 0x00a0108c, .end = 0x00a0108c },
+ { .start = 0x00a01c20, .end = 0x00a01c28 },
+ { .start = 0x00a01d10, .end = 0x00a01d10 },
+ { .start = 0x00a01e28, .end = 0x00a01e2c },
+ { .start = 0x00a01e60, .end = 0x00a01e60 },
+ { .start = 0x00a01e80, .end = 0x00a01e80 },
+ { .start = 0x00a01ea0, .end = 0x00a01ea0 },
+ { .start = 0x00a02000, .end = 0x00a0201c },
+ { .start = 0x00a02024, .end = 0x00a02024 },
+ { .start = 0x00a02040, .end = 0x00a02048 },
+ { .start = 0x00a020c0, .end = 0x00a020e0 },
+ { .start = 0x00a02400, .end = 0x00a02404 },
+ { .start = 0x00a0240c, .end = 0x00a02414 },
+ { .start = 0x00a0241c, .end = 0x00a0243c },
+ { .start = 0x00a02448, .end = 0x00a024bc },
+ { .start = 0x00a024c4, .end = 0x00a024cc },
+ { .start = 0x00a02508, .end = 0x00a02508 },
+ { .start = 0x00a02510, .end = 0x00a02514 },
+ { .start = 0x00a0251c, .end = 0x00a0251c },
+ { .start = 0x00a0252c, .end = 0x00a0255c },
+ { .start = 0x00a02564, .end = 0x00a025a0 },
+ { .start = 0x00a025a8, .end = 0x00a025b4 },
+ { .start = 0x00a025c0, .end = 0x00a025c0 },
+ { .start = 0x00a025e8, .end = 0x00a025f4 },
+ { .start = 0x00a02c08, .end = 0x00a02c18 },
+ { .start = 0x00a02c2c, .end = 0x00a02c38 },
+ { .start = 0x00a02c68, .end = 0x00a02c78 },
+ { .start = 0x00a03000, .end = 0x00a03000 },
+ { .start = 0x00a03010, .end = 0x00a03014 },
+ { .start = 0x00a0301c, .end = 0x00a0302c },
+ { .start = 0x00a03034, .end = 0x00a03038 },
+ { .start = 0x00a03040, .end = 0x00a03044 },
+ { .start = 0x00a03060, .end = 0x00a03068 },
+ { .start = 0x00a03070, .end = 0x00a03070 },
+ { .start = 0x00a0307c, .end = 0x00a03084 },
+ { .start = 0x00a0308c, .end = 0x00a03090 },
+ { .start = 0x00a03098, .end = 0x00a03098 },
+ { .start = 0x00a030a0, .end = 0x00a030a0 },
+ { .start = 0x00a030a8, .end = 0x00a030b4 },
+ { .start = 0x00a030bc, .end = 0x00a030c0 },
+ { .start = 0x00a030c8, .end = 0x00a030f4 },
+ { .start = 0x00a03100, .end = 0x00a0312c },
+ { .start = 0x00a03c00, .end = 0x00a03c5c },
+ { .start = 0x00a04400, .end = 0x00a04454 },
+ { .start = 0x00a04460, .end = 0x00a04474 },
+ { .start = 0x00a044c0, .end = 0x00a044ec },
+ { .start = 0x00a04500, .end = 0x00a04504 },
+ { .start = 0x00a04510, .end = 0x00a04538 },
+ { .start = 0x00a04540, .end = 0x00a04548 },
+ { .start = 0x00a04560, .end = 0x00a04560 },
+ { .start = 0x00a04570, .end = 0x00a0457c },
+ { .start = 0x00a04590, .end = 0x00a04590 },
+ { .start = 0x00a04598, .end = 0x00a04598 },
+ { .start = 0x00a045c0, .end = 0x00a045f4 },
+ { .start = 0x00a0c000, .end = 0x00a0c018 },
+ { .start = 0x00a0c020, .end = 0x00a0c028 },
+ { .start = 0x00a0c038, .end = 0x00a0c094 },
+ { .start = 0x00a0c0c0, .end = 0x00a0c104 },
+ { .start = 0x00a0c10c, .end = 0x00a0c118 },
+ { .start = 0x00a0c150, .end = 0x00a0c174 },
+ { .start = 0x00a0c17c, .end = 0x00a0c188 },
+ { .start = 0x00a0c190, .end = 0x00a0c198 },
+ { .start = 0x00a0c1a0, .end = 0x00a0c1a8 },
+ { .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
+};
+
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
u32 len_bytes, __le32 *data)
{
@@ -478,15 +565,20 @@ static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
}
-static void iwl_dump_prph(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data,
+static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
const struct iwl_prph_range *iwl_prph_dump_addr,
- u32 range_len)
+ u32 range_len, void *ptr)
{
struct iwl_fw_error_dump_prph *prph;
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_fw_error_dump_data **data =
+ (struct iwl_fw_error_dump_data **)ptr;
unsigned long flags;
u32 i;
+ if (!data)
+ return;
+
IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
if (!iwl_trans_grab_nic_access(trans, &flags))
@@ -552,37 +644,47 @@ static struct scatterlist *alloc_sgtable(int size)
return table;
}
-static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
+static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len, void *ptr)
{
- u32 prph_len = 0;
- int i;
+ u32 *prph_len = (u32 *)ptr;
+ int i, num_bytes_in_chunk;
+
+ if (!prph_len)
+ return;
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
- i++) {
+ for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_comm[i].end -
- iwl_prph_dump_addr_comm[i].start + 4;
+ num_bytes_in_chunk =
+ iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4;
- prph_len += sizeof(struct iwl_fw_error_dump_data) +
+ *prph_len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
+}
- if (fwrt->trans->cfg->mq_rx_supported) {
- for (i = 0; i <
- ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_9000[i].end -
- iwl_prph_dump_addr_9000[i].start + 4;
-
- prph_len += sizeof(struct iwl_fw_error_dump_data) +
- sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
+static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
+ void (*handler)(struct iwl_fw_runtime *,
+ const struct iwl_prph_range *,
+ u32, void *))
+{
+ u32 range_len;
+
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
+ handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
+ } else {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
+ handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
+
+ if (fwrt->trans->cfg->mq_rx_supported) {
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
+ handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
}
}
- return prph_len;
}
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
@@ -646,6 +748,9 @@ static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
/* Count RXF1 sizes */
+ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
+ mem_cfg->num_lmacs = MAX_NUM_LMAC;
+
for (i = 0; i < mem_cfg->num_lmacs; i++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
@@ -664,6 +769,9 @@ static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
goto dump_internal_txf;
/* Count TXF sizes */
+ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
+ mem_cfg->num_lmacs = MAX_NUM_LMAC;
+
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
@@ -733,6 +841,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
+ if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
+ return NULL;
img = &fwrt->fw->img[fwrt->cur_fw_img];
sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
@@ -747,9 +857,9 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
- if (!fwrt->trans->cfg->gen2 &&
- iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
- prph_len += iwl_fw_get_prph_len(fwrt);
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
+ iwl_fw_prph_handler(fwrt, &prph_len,
+ iwl_fw_get_prph_len);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
@@ -828,7 +938,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
- dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status);
+ dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
+ dump_info->lmac_err_id[0] =
+ cpu_to_le32(fwrt->dump.lmac_err_id[0]);
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ dump_info->lmac_err_id[1] =
+ cpu_to_le32(fwrt->dump.lmac_err_id[1]);
+ dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
dump_data = iwl_fw_error_next_data(dump_data);
}
@@ -935,16 +1051,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (iwl_fw_dbg_is_paging_enabled(fwrt))
iwl_dump_paging(fwrt, &dump_data);
- if (prph_len) {
- iwl_dump_prph(fwrt->trans, &dump_data,
- iwl_prph_dump_addr_comm,
- ARRAY_SIZE(iwl_prph_dump_addr_comm));
-
- if (fwrt->trans->cfg->mq_rx_supported)
- iwl_dump_prph(fwrt->trans, &dump_data,
- iwl_prph_dump_addr_9000,
- ARRAY_SIZE(iwl_prph_dump_addr_9000));
- }
+ if (prph_len)
+ iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
out:
dump_file->file_len = cpu_to_le32(file_len);
@@ -1108,13 +1216,13 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
iwl_dump_prph_ini(fwrt->trans, data, reg);
break;
case IWL_FW_INI_REGION_DRAM_BUFFER:
- *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR;
+ *dump_mask |= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
break;
case IWL_FW_INI_REGION_PAGING:
if (iwl_fw_dbg_is_paging_enabled(fwrt))
iwl_dump_paging(fwrt, data);
else
- *dump_mask |= IWL_FW_ERROR_DUMP_PAGING;
+ *dump_mask |= BIT(IWL_FW_ERROR_DUMP_PAGING);
break;
case IWL_FW_INI_REGION_TXF:
iwl_fw_dump_txf(fwrt, data);
@@ -1146,10 +1254,6 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
if (id == FW_DBG_TRIGGER_FW_ASSERT)
id = IWL_FW_TRIGGER_ID_FW_ASSERT;
- else if (id == FW_DBG_TRIGGER_USER)
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
- else if (id < FW_DBG_TRIGGER_MAX)
- return NULL;
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
return NULL;
@@ -1385,7 +1489,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
if (WARN_ON(!fwrt->dump.active_trigs[id].active))
return -EINVAL;
- delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec);
+ delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->dump_delay);
occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
if (!occur)
return 0;
@@ -1570,27 +1674,29 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_allocation_tlv *alloc)
{
struct iwl_trans *trans = fwrt->trans;
- struct iwl_continuous_record_cmd cont_rec = {};
- struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0];
+ struct iwl_ldbg_config_cmd ldbg_cmd = {
+ .type = cpu_to_le32(BUFFER_ALLOCATION),
+ };
+ struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation;
struct iwl_host_cmd hcmd = {
.id = LDBG_CONFIG_CMD,
.flags = CMD_ASYNC,
- .data[0] = &cont_rec,
- .len[0] = sizeof(cont_rec),
+ .data[0] = &ldbg_cmd,
+ .len[0] = sizeof(ldbg_cmd),
};
void *virtual_addr = NULL;
u32 size = le32_to_cpu(alloc->size);
dma_addr_t phys_addr;
- cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
-
if (!trans->num_blocks &&
le32_to_cpu(alloc->buffer_location) !=
IWL_FW_INI_LOCATION_DRAM_PATH)
return;
- virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size,
- &phys_addr, GFP_KERNEL);
+ virtual_addr =
+ dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO |
+ __GFP_COMP);
/* TODO: alloc fragments if needed */
if (!virtual_addr)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 6aabbdd72326..330229d2a61d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -102,7 +102,10 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
- fwrt->dump.rt_status = 0;
+ fwrt->dump.lmac_err_id[0] = 0;
+ if (fwrt->smem_cfg.num_lmacs > 1)
+ fwrt->dump.lmac_err_id[1] = 0;
+ fwrt->dump.umac_err_id = 0;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
@@ -266,20 +269,20 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
-static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
+static inline int
+iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
{
- struct iwl_continuous_record_cmd cont_rec = {};
+ struct iwl_ldbg_config_cmd cmd = {
+ .type = start ? cpu_to_le32(START_DEBUG_RECORDING) :
+ cpu_to_le32(STOP_DEBUG_RECORDING),
+ };
struct iwl_host_cmd hcmd = {
.id = LDBG_CONFIG_CMD,
.flags = CMD_ASYNC,
- .data[0] = &cont_rec,
- .len[0] = sizeof(cont_rec),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
};
- cont_rec.record_mode.enable_recording = start ?
- cpu_to_le16(START_DEBUG_RECORDING) :
- cpu_to_le16(STOP_DEBUG_RECORDING);
-
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
@@ -378,6 +381,7 @@ static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
{
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
+ fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 65faecf552cd..c02425a1e64f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -180,6 +180,8 @@ enum iwl_fw_error_dump_family {
IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
};
+#define MAX_NUM_LMAC 2
+
/**
* struct iwl_fw_error_dump_info - info on the device / firmware
* @device_family: the family of the device (7 / 8)
@@ -187,7 +189,10 @@ enum iwl_fw_error_dump_family {
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
* @bus_human_readable: name of the bus used
- * @rt_status: the error_id/rt_status that that triggered the latest dump
+ * @num_of_lmacs: the number of lmacs
+ * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump
+ * if the dump collection was not initiated by an assert, the value is 0
+ * @umac_err_id: the umac error_id/rt_status that triggered the latest dump
* if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
@@ -196,7 +201,9 @@ struct iwl_fw_error_dump_info {
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
u8 bus_human_readable[8];
- __le32 rt_status;
+ u8 num_of_lmacs;
+ __le32 umac_err_id;
+ __le32 lmac_err_id[MAX_NUM_LMAC];
} __packed;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 81f557c0b58d..e8b00b795cbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -303,7 +303,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
* @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
* @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
- * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
* @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
* @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
* tx power value into TPC Report action frame and Link Measurement Report
@@ -334,6 +333,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
* @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2
+ * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
+ * (6 GHz).
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -357,10 +358,13 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
* command size (command version 4) that supports toggling ACK TX
* power reduction.
- * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
* @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
* capability.
+ * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured
+ * to report the CSI information with (certain) RX frames
+ *
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -369,7 +373,6 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
- IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5,
IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
@@ -394,6 +397,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
+ IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@@ -412,6 +416,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
+ IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90,
+
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 4f7090f88cb0..a0fcbb28114b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -142,7 +142,8 @@ struct iwl_fw_runtime {
u32 *d3_debug_data;
struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
- u32 rt_status;
+ u32 lmac_err_id[MAX_NUM_LMAC];
+ u32 umac_err_id;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 91861a9cbe57..ff942532fac3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -335,10 +335,6 @@ struct iwl_csr_params {
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
- * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
- * (if supported)
- * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
- * next step. Supported only in integrated solutions.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section (only DVM)
@@ -392,8 +388,6 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const char *fw_name_pre_b_or_c_step;
- const char *fw_name_pre_rf_next_step;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
@@ -551,29 +545,40 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_2ac_160_cfg;
extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22560_2ax_cfg_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
+extern const struct iwl_cfg killer1650x_2ax_cfg;
+extern const struct iwl_cfg killer1650w_2ax_cfg;
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index caa5806acd81..42af421bbc3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -325,6 +325,7 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
+#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
/* RF_ID value */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index bf1be985f36b..2efa1dfe9b4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -210,18 +210,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const struct iwl_cfg *cfg = drv->trans->cfg;
char tag[8];
- const char *fw_pre_name;
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
- CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
- fw_pre_name = cfg->fw_name_pre_b_or_c_step;
- else if (drv->trans->cfg->integrated &&
- CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
- cfg->fw_name_pre_rf_next_step)
- fw_pre_name = cfg->fw_name_pre_rf_next_step;
- else
- fw_pre_name = cfg->fw_name_pre;
+ (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
+ CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
+ IWL_ERR(drv,
+ "Only HW steps B and C are currently supported (0x%0x)\n",
+ drv->trans->hw_rev);
+ return -EINVAL;
+ }
if (first) {
drv->fw_index = cfg->ucode_api_max;
@@ -235,15 +232,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
IWL_ERR(drv, "no suitable firmware found!\n");
if (cfg->ucode_api_min == cfg->ucode_api_max) {
- IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
+ IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre,
cfg->ucode_api_max);
} else {
IWL_ERR(drv, "minimum version required: %s%d\n",
- fw_pre_name,
- cfg->ucode_api_min);
+ cfg->fw_name_pre, cfg->ucode_api_min);
IWL_ERR(drv, "maximum version supported: %s%d\n",
- fw_pre_name,
- cfg->ucode_api_max);
+ cfg->fw_name_pre, cfg->ucode_api_max);
}
IWL_ERR(drv,
@@ -252,7 +247,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
}
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
- fw_pre_name, tag);
+ cfg->fw_name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 4f10914f6048..ffd1e649bfa0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,10 +1,13 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
- * Portions of this file are derived from the ipw3945 project.
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -15,12 +18,44 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 38085850a2d3..61477e58352d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,8 +1,9 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
*
- * Portions of this file are derived from the ipw3945 project.
+ * GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -14,14 +15,43 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
-
#ifndef __iwl_io_h__
#define __iwl_io_h__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 73b1c46f1158..0cae2ef9b9df 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -152,4 +152,22 @@ struct iwl_mod_params {
bool enable_ini;
};
+static inline bool iwl_enable_rx_ampdu(void)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(void)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index d9afedc3d1d9..484ef4556953 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -569,8 +569,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_RES,
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
@@ -1196,14 +1195,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd_to_copy = sizeof(struct ieee80211_regdomain) +
valid_rules * sizeof(struct ieee80211_reg_rule);
- copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
+ copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
if (!copy_rd) {
copy_rd = ERR_PTR(-ENOMEM);
goto out;
}
- memcpy(copy_rd, regd, regd_to_copy);
-
out:
kfree(regdb_ptrs);
kfree(regd);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 9d89b7d7f9fa..3aaa5f06461c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -358,12 +358,12 @@
/* FW monitor */
#define MON_BUFF_SAMPLE_CTL (0xa03c00)
-#define MON_BUFF_BASE_ADDR (0xa03c3c)
+#define MON_BUFF_BASE_ADDR (0xa03c1c)
#define MON_BUFF_END_ADDR (0xa03c40)
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
/* FW monitor family 8000 and on */
-#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c)
+#define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c)
#define MON_BUFF_END_ADDR_VER2 (0xa03c20)
#define MON_BUFF_WRPTR_VER2 (0xa03c24)
#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 9ffd21918b5a..30cbd981efbd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -7,7 +7,6 @@ iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
-iwlmvm-y += tof.o
iwlmvm-$(CONFIG_PM) += d3.o
-ccflags-y += -I$(src)/../
+ccflags-y += -I $(srctree)/$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 01b5338201d6..36ed7d6fc971 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
file->private_data = inode->i_private;
- ieee80211_stop_queues(mvm->hw);
synchronize_net();
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
@@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
rtnl_unlock();
if (err > 0)
err = -EINVAL;
- if (err) {
- ieee80211_wake_queues(mvm->hw);
+ if (err)
return err;
- }
+
mvm->d3_test_active = true;
mvm->keep_vif = NULL;
return 0;
@@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
- ieee80211_wake_queues(mvm->hw);
-
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 33b0af24a537..2453ceabf00d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -60,7 +60,6 @@
*
*****************************************************************************/
#include "mvm.h"
-#include "fw/api/tof.h"
#include "debugfs.h"
static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -523,753 +522,30 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = -EINVAL;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("tof_disabled=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.tof_disabled = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.one_sided_disabled = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("is_debug_mode=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.is_debug_mode = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("is_buf=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.tof_cfg.is_buf_required = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- ret = iwl_mvm_tof_config_cmd(mvm);
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_config_cmd *cmd;
-
- cmd = &mvm->tof_data.tof_cfg;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
- cmd->tof_disabled);
- pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
- cmd->one_sided_disabled);
- pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
- cmd->is_debug_mode);
- pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
- cmd->is_buf_required);
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("burst_period=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (!ret)
- mvm->tof_data.responder_cfg.burst_period =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.min_delta_ftm = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("burst_duration=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.burst_duration = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.num_of_burst_exp = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("abort_responder=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.abort_responder = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("get_ch_est=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.get_ch_est = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.recv_sta_req_params = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("channel_num=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.channel_num = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("bandwidth=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.bandwidth = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("rate=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.rate = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("bssid=", buf);
- if (data) {
- u8 *mac = mvm->tof_data.responder_cfg.bssid;
-
- if (!mac_pton(data, mac)) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("toa_offset=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.toa_offset =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("center_freq=", buf);
- if (data) {
- struct iwl_tof_responder_config_cmd *cmd =
- &mvm->tof_data.responder_cfg;
-
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- enum nl80211_band band = (cmd->channel_num <= 14) ?
- NL80211_BAND_2GHZ :
- NL80211_BAND_5GHZ;
- struct ieee80211_channel chn = {
- .band = band,
- .center_freq = ieee80211_channel_to_frequency(
- cmd->channel_num, band),
- };
- struct cfg80211_chan_def chandef = {
- .chan = &chn,
- .center_freq1 =
- ieee80211_channel_to_frequency(value,
- band),
- };
-
- cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
- }
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.ftm_per_burst = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("asap_mode=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.responder_cfg.asap_mode = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- ret = iwl_mvm_tof_responder_cmd(mvm, vif);
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_responder_config_cmd *cmd;
-
- cmd = &mvm->tof_data.responder_cfg;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
- le16_to_cpu(cmd->burst_period));
- pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
- cmd->burst_duration);
- pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
- cmd->bandwidth);
- pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
- cmd->channel_num);
- pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
- cmd->ctrl_ch_position);
- pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
- cmd->bssid);
- pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
- cmd->min_delta_ftm);
- pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
- cmd->num_of_burst_exp);
- pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
- pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
- cmd->abort_responder);
- pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
- cmd->get_ch_est);
- pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
- cmd->recv_sta_req_params);
- pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
- cmd->ftm_per_burst);
- pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
- cmd->ftm_resp_ts_avail);
- pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
- cmd->asap_mode);
- pos += scnprintf(buf + pos, bufsz - pos,
- "tsf_timer_offset_msecs = %d\n",
- le16_to_cpu(cmd->tsf_timer_offset_msecs));
- pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
- le16_to_cpu(cmd->toa_offset));
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("request_id=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.request_id = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("initiator=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.initiator = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.one_sided_los_disable = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("req_timeout=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.req_timeout = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("report_policy=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.report_policy = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("macaddr_random=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.macaddr_random = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("num_of_ap=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req.num_of_ap = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("macaddr_template=", buf);
- if (data) {
- u8 mac[ETH_ALEN];
-
- if (!mac_pton(data, mac)) {
- ret = -EINVAL;
- goto out;
- }
- memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("macaddr_mask=", buf);
- if (data) {
- u8 mac[ETH_ALEN];
-
- if (!mac_pton(data, mac)) {
- ret = -EINVAL;
- goto out;
- }
- memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ap=", buf);
- if (data) {
- struct iwl_tof_range_req_ap_entry ap = {};
- int size = sizeof(struct iwl_tof_range_req_ap_entry);
- u16 burst_period;
- u8 *mac = ap.bssid;
- unsigned int i;
-
- if (sscanf(data, "%u %hhd %hhd %hhd"
- "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
- "%hhd %hhd %hd"
- "%hhd %hhd %d"
- "%hhx %hhd %hhd %hhd",
- &i, &ap.channel_num, &ap.bandwidth,
- &ap.ctrl_ch_position,
- mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
- &ap.measure_type, &ap.num_of_bursts,
- &burst_period,
- &ap.samples_per_burst, &ap.retries_per_sample,
- &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
- &ap.enable_dyn_ack, &ap.rssi) != 20) {
- ret = -EINVAL;
- goto out;
- }
- if (i >= IWL_MVM_TOF_MAX_APS) {
- IWL_ERR(mvm, "Invalid AP index %d\n", i);
- ret = -EINVAL;
- goto out;
- }
-
- ap.burst_period = cpu_to_le16(burst_period);
-
- memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_range_request=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value)
- ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
- goto out;
- }
-
- ret = -EINVAL;
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[512];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_range_req_cmd *cmd;
- int i;
-
- cmd = &mvm->tof_data.range_req;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
- cmd->request_id);
- pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
- cmd->initiator);
- pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
- cmd->one_sided_los_disable);
- pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
- cmd->req_timeout);
- pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
- cmd->report_policy);
- pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
- cmd->macaddr_random);
- pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
- cmd->macaddr_template);
- pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
- cmd->macaddr_mask);
- pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
- cmd->num_of_ap);
- for (i = 0; i < cmd->num_of_ap; i++) {
- struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "ap %.2d: channel_num=%hhd bw=%hhd"
- " control=%hhd bssid=%pM type=%hhd"
- " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
- " retries=%hhd tsf_delta=%d"
- " tsf_delta_direction=%hhd location_req=0x%hhx "
- " asap=%hhd enable=%hhd rssi=%hhd\n",
- i, ap->channel_num, ap->bandwidth,
- ap->ctrl_ch_position, ap->bssid,
- ap->measure_type, ap->num_of_bursts,
- ap->burst_period, ap->samples_per_burst,
- ap->retries_per_sample, ap->tsf_delta,
- ap->tsf_delta_direction,
- ap->location_req, ap->asap_mode,
- ap->enable_dyn_ack, ap->rssi);
- }
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
- cpu_to_le16(value);
- goto out;
- }
-
- data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.min_delta_ftm = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
- value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
- value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
- value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value)
- ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
- goto out;
- }
-
- ret = -EINVAL;
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- struct iwl_tof_range_req_ext_cmd *cmd;
-
- cmd = &mvm->tof_data.range_req_ext;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "tsf_timer_offset_msec = %hd\n",
- cmd->tsf_timer_offset_msec);
- pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
- cmd->min_delta_ftm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ftm_format_and_bw20M = %hhd\n",
- cmd->ftm_format_and_bw20M);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ftm_format_and_bw40M = %hhd\n",
- cmd->ftm_format_and_bw40M);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ftm_format_and_bw80M = %hhd\n",
- cmd->ftm_format_and_bw80M);
-
- mutex_unlock(&mvm->mutex);
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
- char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 value;
- int abort_id, ret = 0;
- char *data;
-
- mutex_lock(&mvm->mutex);
-
- data = iwl_dbgfs_is_match("abort_id=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0)
- mvm->tof_data.last_abort_id = value;
- goto out;
- }
-
- data = iwl_dbgfs_is_match("send_range_abort=", buf);
- if (data) {
- ret = kstrtou32(data, 10, &value);
- if (ret == 0 && value) {
- abort_id = mvm->tof_data.last_abort_id;
- ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- char buf[32];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- int last_abort_id;
-
- mutex_lock(&mvm->mutex);
- last_abort_id = mvm->tof_data.last_abort_id;
- mutex_unlock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
- last_abort_id);
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
{
- struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- char *buf;
- int pos = 0;
- const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
- struct iwl_tof_range_rsp_ntfy *cmd;
- int i, ret;
+ u8 value;
+ int ret;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
mutex_lock(&mvm->mutex);
- cmd = &mvm->tof_data.range_resp;
-
- pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
- cmd->request_id);
- pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
- cmd->request_status);
- pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
- cmd->last_in_batch);
- pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
- cmd->num_of_aps);
- for (i = 0; i < cmd->num_of_aps; i++) {
- struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
- " rtt=%d rtt_var=%d rtt_spread=%d"
- " rssi=%hhd rssi_spread=%hhd"
- " range=%d range_var=%d"
- " time_stamp=%d\n",
- i, ap->bssid, ap->measure_status,
- ap->measure_bw,
- ap->rtt, ap->rtt_variance, ap->rtt_spread,
- ap->rssi, ap->rssi_spread, ap->range,
- ap->range_variance, ap->timestamp);
- }
+ iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
mutex_unlock(&mvm->mutex);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
+ return count;
}
-static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
- size_t count, loff_t *ppos)
+static ssize_t
+iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
@@ -1279,13 +555,24 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
ret = kstrtou8(buf, 0, &value);
if (ret)
return ret;
- if (value > 1)
+
+ if (value > NUM_LOW_LATENCY_FORCE)
return -EINVAL;
mutex_lock(&mvm->mutex);
- iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
+ if (value == LOW_LATENCY_FORCE_UNSET) {
+ iwl_mvm_update_low_latency(mvm, vif, false,
+ LOW_LATENCY_DEBUGFS_FORCE);
+ iwl_mvm_update_low_latency(mvm, vif, false,
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
+ } else {
+ iwl_mvm_update_low_latency(mvm, vif,
+ value == LOW_LATENCY_FORCE_ON,
+ LOW_LATENCY_DEBUGFS_FORCE);
+ iwl_mvm_update_low_latency(mvm, vif, true,
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
+ }
mutex_unlock(&mvm->mutex);
-
return count;
}
@@ -1295,15 +582,25 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[30] = {};
+ char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n"
+ "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n";
+
+ /*
+ * all values in format are boolean so the size of format is enough
+ * for holding the result string
+ */
+ char buf[sizeof(format) + 1] = {};
int len;
- len = scnprintf(buf, sizeof(buf) - 1,
- "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
+ len = scnprintf(buf, sizeof(buf) - 1, format,
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
!!(mvmvif->low_latency & LOW_LATENCY_VCMD),
- !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
+ !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE),
+ !!(mvmvif->low_latency &
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE),
+ !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE),
+ !!(mvmvif->low_latency_actual));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1456,14 +753,9 @@ MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
-MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
@@ -1497,6 +789,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
@@ -1506,24 +799,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif == mvm->bf_allowed_vif)
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
- if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
- !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
- if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
- MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
- mvmvif->dbgfs_dir, 0600);
-
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
- 0600);
- MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
- 0400);
- }
-
/*
* Create symlink for convenience pointing to interface specific
* debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 52c361a6124c..e136475a34f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -69,6 +69,7 @@
#include "sta.h"
#include "iwl-io.h"
#include "debugfs.h"
+#include "iwl-modparams.h"
#include "fw/error-dump.h"
static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
@@ -1206,47 +1207,6 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-/*
- * Enable / Disable continuous recording.
- * Cause the FW to start continuous recording, by sending the relevant hcmd.
- * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING.
- * Disable: for 0 as input, DISABLE_CONT_RECORDING.
- */
-static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_trans *trans = mvm->trans;
- const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
- struct iwl_continuous_record_cmd cont_rec = {};
- int ret, rec_mode;
-
- if (!iwl_mvm_firmware_running(mvm))
- return -EIO;
-
- if (!dest)
- return -EOPNOTSUPP;
-
- if (dest->monitor_mode != SMEM_MODE ||
- trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
- return -EOPNOTSUPP;
-
- ret = kstrtoint(buf, 0, &rec_mode);
- if (ret)
- return ret;
-
- cont_rec.record_mode.enable_recording = rec_mode ?
- cpu_to_le16(ENABLE_CONT_RECORDING) :
- cpu_to_le16(DISABLE_CONT_RECORDING);
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0,
- sizeof(cont_rec), &cont_rec);
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
@@ -1722,11 +1682,33 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
+struct iwl_mvm_sniffer_apply {
+ struct iwl_mvm *mvm;
+ u16 aid;
+};
+
+static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm_sniffer_apply *apply = data;
+
+ apply->mvm->cur_aid = cpu_to_le16(apply->aid);
+
+ return true;
+}
+
static ssize_t
iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
+ struct iwl_notification_wait wait;
struct iwl_he_monitor_cmd he_mon_cmd = {};
+ struct iwl_mvm_sniffer_apply apply = {
+ .mvm = mvm,
+ };
+ u16 wait_cmds[] = {
+ iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0),
+ };
u32 aid;
int ret;
@@ -1742,10 +1724,30 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
he_mon_cmd.aid = cpu_to_le16(aid);
+ apply.aid = aid;
+
mutex_lock(&mvm->mutex);
+
+ /*
+ * Use the notification waiter to get our function triggered
+ * in sequence with other RX. This ensures that frames we get
+ * on the RX queue _before_ the new configuration is applied
+ * still have mvm->cur_aid pointing to the old AID, and that
+ * frames on the RX queue _after_ the firmware processed the
+ * new configuration (and sent the response, synchronously)
+ * get mvm->cur_aid correctly set to the new AID.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait,
+ wait_cmds, ARRAY_SIZE(wait_cmds),
+ iwl_mvm_sniffer_apply, &apply);
+
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
DATA_PATH_GROUP, 0), 0,
sizeof(he_mon_cmd), &he_mon_cmd);
+
+ /* no need to really wait, we already did anyway */
+ iwl_remove_notification(&mvm->notif_wait, &wait);
+
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -1800,7 +1802,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
-MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
@@ -2004,7 +2005,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
#ifdef CONFIG_ACPI
@@ -2071,6 +2071,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_blob("nvm_phy_sku", 0400,
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
+ if (!debugfs_create_blob("nvm_reg", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_reg_blob))
+ goto err;
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 143c7fcaea41..e3eb812e0248 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,7 +91,7 @@
#include "fw/api/sf.h"
#include "fw/api/sta.h"
#include "fw/api/stats.h"
-#include "fw/api/tof.h"
+#include "fw/api/location.h"
#include "fw/api/tx.h"
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 0d6c313b6669..d3dc9d276e0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -293,9 +293,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
- struct iwl_mvm_alive_data alive_data;
+ struct iwl_mvm_alive_data alive_data = {};
const struct fw_img *fw;
- int ret, i;
+ int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
@@ -373,9 +373,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
BIT(IWL_MAX_TID_COUNT + 2);
- for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
- atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
-
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_fw_set_dbg_rec_on(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 7779951a9533..7cfdd07d8736 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data {
bool found_vif;
};
-struct iwl_mvm_hw_queues_iface_iterator_data {
- struct ieee80211_vif *exclude_vif;
- unsigned long used_hw_queues;
-};
-
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
data->preferred_tsf = NUM_TSF_IDS;
}
-/*
- * Get the mask of the queues used by the vif
- */
-u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
-{
- u32 qmask = 0, ac;
-
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
- return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
-
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
- qmask |= BIT(vif->hw_queue[ac]);
- }
-
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)
- qmask |= BIT(vif->cab_queue);
-
- return qmask;
-}
-
-static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
-
- /* exclude the given vif */
- if (vif == data->exclude_vif)
- return;
-
- data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
-}
-
-unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
- struct ieee80211_vif *exclude_vif)
-{
- struct iwl_mvm_hw_queues_iface_iterator_data data = {
- .exclude_vif = exclude_vif,
- .used_hw_queues =
- BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_DQA_GCAST_QUEUE),
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- /* mark all VIF used hw queues */
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- iwl_mvm_iface_hw_queues_iter, &data);
-
- return data.used_hw_queues;
-}
-
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
iwl_mvm_mac_iface_iterator, &data);
- used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
-
/*
* In the case we're getting here during resume, it's similar to
* firmware restart, and with RESUME_ALL the iterator will find
@@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* the ones here - no real limit
*/
queue_limit = IEEE80211_MAX_QUEUES;
- BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
- BITS_PER_BYTE *
- sizeof(mvm->hw_queue_to_mac80211[0]));
/*
* Find available queues, and allocate them to the ACs. When in
@@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* queue value (when queue is enabled).
*/
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- } else {
- vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
}
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
@@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
exit_fail:
memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
- memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
- vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
return ret;
}
@@ -778,27 +708,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax) {
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u8 sta_id = mvmvif->ap_sta_id;
-
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
- if (sta_id != IWL_MVM_INVALID_STA) {
- struct ieee80211_sta *sta;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /*
- * TODO: we should check the ext cap IE but it is
- * unclear why the spec requires two bits (one in HE
- * cap IE, and one in the ext cap IE). In the meantime
- * rely on the HE cap IE only.
- */
- if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] &
- IEEE80211_HE_MAC_CAP0_TWT_RES))
- ctxt_sta->data_policy |=
- cpu_to_le32(TWT_SUPPORTED);
- }
+ if (vif->bss_conf.twt_requester)
+ ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
}
@@ -881,8 +793,6 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
- cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-
/* Override the filter flags to accept only probe requests */
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
@@ -1203,7 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
- ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+ ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
/*
* Only set the beacon time when the MAC is being added, when we
@@ -1435,7 +1345,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
IWL_DEBUG_RX(mvm,
- "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
+ "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n",
status, beacon_notify_hdr->failure_frame,
le64_to_cpu(beacon->tsf),
mvm->ap_last_beacon_gp2,
@@ -1472,35 +1382,48 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
- struct iwl_missed_beacons_notif *missed_beacons = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
struct iwl_fw_dbg_trigger_tlv *trigger;
u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(mb->mac_id);
- if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
- return;
+ IWL_DEBUG_INFO(mvm,
+ "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+ le32_to_cpu(mb->mac_id),
+ le32_to_cpu(mb->consec_missed_beacons),
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+ le32_to_cpu(mb->num_recvd_beacons),
+ le32_to_cpu(mb->num_expected_beacons));
+
+ rcu_read_lock();
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif)
+ goto out;
- rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+ rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
rx_missed_bcon_since_rx =
- le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
/*
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
- IWL_MVM_MISSED_BEACONS_THRESHOLD)
+ if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
- return;
+ goto out;
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
@@ -1512,28 +1435,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
-}
-
-void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
-
- IWL_DEBUG_INFO(mvm,
- "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
- le32_to_cpu(mb->mac_id),
- le32_to_cpu(mb->consec_missed_beacons),
- le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
- le32_to_cpu(mb->num_recvd_beacons),
- le32_to_cpu(mb->num_expected_beacons));
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_beacon_loss_iterator,
- mb);
iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS);
+
+out:
+ rcu_read_unlock();
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
@@ -1575,16 +1481,29 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
-static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
- struct iwl_probe_resp_data_notif *notif = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
struct iwl_probe_resp_data *old_data, *new_data;
+ int len = iwl_rx_packet_payload_len(pkt);
+ u32 id = le32_to_cpu(notif->mac_id);
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
- if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id))
+ if (WARN_ON_ONCE(len < sizeof(*notif)))
return;
+ IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
+ notif->noa_active, notif->csa_counter);
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
+ if (!vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
if (!new_data)
return;
@@ -1615,25 +1534,6 @@ static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
ieee80211_csa_set_counter(vif, notif->csa_counter);
}
-void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
- int len = iwl_rx_packet_payload_len(pkt);
-
- if (WARN_ON_ONCE(len < sizeof(*notif)))
- return;
-
- IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
- notif->noa_active, notif->csa_counter);
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_ACTIVE,
- iwl_mvm_probe_resp_data_iter,
- notif);
-}
-
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 97dc464379d2..c9effd7f0ef8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -395,6 +395,21 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
+const static u8 he_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = he_if_types_ext_capa_sta,
+ .extended_capabilities_mask = he_if_types_ext_capa_sta,
+ .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
+ },
+};
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -410,7 +425,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SPECTRUM_MGMT);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, QUEUE_CONTROL);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
@@ -424,6 +438,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+ ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
if (iwl_mvm_has_tlc_offload(mvm)) {
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
@@ -469,6 +487,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ hw->max_tx_fragments = mvm->trans->max_skb_frags;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
@@ -534,6 +553,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
+ hw->txq_data_size = sizeof(struct iwl_mvm_txq);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -673,6 +693,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
}
+ if (mvm->nvm_data->sku_cap_11ax_enable &&
+ !iwlwifi_mod_params.disable_11ax) {
+ hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(he_iftypes_ext_capa);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -776,7 +803,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb);
- ieee80211_stop_queues(mvm->hw);
/* trigger wakeup */
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
@@ -796,13 +822,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_sta *sta = control->sta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
+ IEEE80211_TX_CTL_TX_OFFCHAN;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
goto drop;
}
- if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ if (offchannel &&
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@@ -815,8 +843,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
sta = NULL;
/* If there is no sta, and it's not offchannel - send through AP */
- if (info->control.vif->type == NL80211_IFTYPE_STATION &&
- info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
+ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
+ !offchannel) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
@@ -844,22 +872,95 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
-static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
- return false;
- return true;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ struct sk_buff *skb = NULL;
+
+ /*
+ * No need for threads to be pending here, they can leave the first
+ * taker all the work.
+ *
+ * mvmtxq->tx_request logic:
+ *
+ * If 0, no one is currently TXing, set to 1 to indicate current thread
+ * will now start TX and other threads should quit.
+ *
+ * If 1, another thread is currently TXing, set to 2 to indicate to
+ * that thread that there was another request. Since that request may
+ * have raced with the check whether the queue is empty, the TXing
+ * thread should check the queue's status one more time before leaving.
+ * This check is done in order to not leave any TX hanging in the queue
+ * until the next TX invocation (which may not even happen).
+ *
+ * If 2, another thread is currently TXing, and it will already double
+ * check the queue, so do nothing.
+ */
+ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
+ return;
+
+ rcu_read_lock();
+ do {
+ while (likely(!mvmtxq->stopped &&
+ (mvm->trans->system_pm_mode ==
+ IWL_PLAT_PM_MODE_DISABLED))) {
+ skb = ieee80211_tx_dequeue(hw, txq);
+
+ if (!skb)
+ break;
+
+ if (!txq->sta)
+ iwl_mvm_tx_skb_non_sta(mvm, skb);
+ else
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ }
+ } while (atomic_dec_return(&mvmtxq->tx_request));
+ rcu_read_unlock();
}
-static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
{
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
- return false;
- if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
- return true;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
- /* enabled by default */
- return true;
+ /*
+ * Please note that racing is handled very carefully here:
+ * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
+ * deleted afterwards.
+ * This means that if:
+ * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
+ * queue is allocated and we can TX.
+ * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
+ * a race, should defer the frame.
+ * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
+ * need to allocate the queue and defer the frame.
+ * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
+ * queue is already scheduled for allocation, no need to allocate,
+ * should defer the frame.
+ */
+
+ /* If the queue is allocated TX and return. */
+ if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
+ /*
+ * Check that list is empty to avoid a race where txq_id is
+ * already updated, but the queue allocation work wasn't
+ * finished
+ */
+ if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
+ return;
+
+ iwl_mvm_mac_itxq_xmit(hw, txq);
+ return;
+ }
+
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (!list_empty(&mvmtxq->list))
+ return;
+
+ list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+ schedule_work(&mvm->add_stream_wk);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
@@ -974,7 +1075,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
}
- if (!iwl_enable_rx_ampdu(mvm->cfg)) {
+ if (!iwl_enable_rx_ampdu()) {
ret = -EINVAL;
break;
}
@@ -986,7 +1087,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
timeout);
break;
case IEEE80211_AMPDU_TX_START:
- if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+ if (!iwl_enable_tx_ampdu()) {
ret = -EINVAL;
break;
}
@@ -1066,6 +1167,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_stop_device(mvm);
+ mvm->cur_aid = 0;
+
mvm->scan_status = 0;
mvm->ps_disabled = false;
mvm->calibrating = false;
@@ -1085,7 +1188,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
- memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -1391,6 +1493,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_unlock;
+ rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
+
/* Counting number of interfaces is needed for legacy PM */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count++;
@@ -1582,6 +1686,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
+ RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
+
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = false;
@@ -2146,6 +2252,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
}
+ /* Update MU EDCA params */
+ if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
+ bss_conf->assoc && vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@@ -2672,7 +2784,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
return;
spin_lock_bh(&mvmsta->lock);
- for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
@@ -2861,32 +2973,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action);
}
-static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta)
-{
- struct iwl_mvm_tid_data *tid_data;
- struct sk_buff *skb;
- int i;
-
- spin_lock_bh(&mvm_sta->lock);
- for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
- tid_data = &mvm_sta->tid_data[i];
-
- while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- /*
- * The first deferred frame should've stopped the MAC
- * queues, so we should never get a second deferred
- * frame for the RA/TID.
- */
- iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
- ieee80211_free_txskb(mvm->hw, skb);
- }
- }
- spin_unlock_bh(&mvm_sta->lock);
-}
-
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2920,7 +3006,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
*/
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
- iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
flush_work(&mvm->add_stream_wk);
/*
@@ -2967,6 +3052,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_SETUP);
}
+
+ sta->max_rc_amsdu_len = 1;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
/*
@@ -3036,6 +3123,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_DISABLE_LINK);
}
+
+ /* Remove STA key if this is an AP using WEP */
+ if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
+ int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
+ mvmvif->ap_wep_key);
+
+ if (!ret)
+ ret = rm_ret;
+ }
+
} else {
ret = -EIO;
}
@@ -3431,14 +3528,20 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
- /* Set the channel info data */
- .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
- PHY_BAND_24 : PHY_BAND_5,
- .channel_info.channel = channel->hw_value,
- .channel_info.width = PHY_VHT_CHANNEL_MODE20,
- /* Set the time and duration */
- .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
- };
+ };
+ struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm,
+ &aux_roc_req.channel_info);
+ u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
+ (channel->band == NL80211_BAND_2GHZ) ?
+ PHY_BAND_24 : PHY_BAND_5,
+ PHY_VHT_CHANNEL_MODE20,
+ 0);
+
+ /* Set the time and duration */
+ tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
delay = AUX_ROC_MIN_DELAY;
req_dur = MSEC_TO_TU(duration);
@@ -3463,15 +3566,15 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
}
}
- aux_roc_req.duration = cpu_to_le32(req_dur);
- aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
+ tail->duration = cpu_to_le32(req_dur);
+ tail->apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm,
"ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
channel->hw_value, req_dur, duration, delay,
dtim_interval);
/* Set the node address */
- memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
+ memcpy(tail->node_addr, vif->addr, ETH_ALEN);
lockdep_assert_held(&mvm->mutex);
@@ -3502,7 +3605,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_rx_aux_roc, te_data);
- res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
+ res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len,
&aux_roc_req);
if (res) {
@@ -4656,8 +4759,35 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
mutex_unlock(&mvm->mutex);
}
+static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
+{
+ u8 protocol = ip_hdr(skb)->protocol;
+
+ if (!IS_ENABLED(CONFIG_INET))
+ return false;
+
+ return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
+}
+
+static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* For now don't aggregate IPv6 in AMSDU */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ if (!iwl_mvm_is_csum_supported(mvm))
+ return true;
+
+ return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
+ .wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
@@ -4731,6 +4861,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
#endif
.get_survey = iwl_mvm_mac_get_survey,
.sta_statistics = iwl_mvm_mac_sta_statistics,
+ .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1aa690e081ff..12e9ecc3ee27 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -83,7 +83,6 @@
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
-#include "tof.h"
#include "fw/runtime.h"
#include "fw/dbg.h"
#include "fw/acpi.h"
@@ -95,6 +94,8 @@
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -299,17 +300,38 @@ enum iwl_bt_force_ant_mode {
};
/**
+ * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs
+ * @LOW_LATENCY_FORCE_UNSET: unset force mode
+ * @LOW_LATENCY_FORCE_ON: for low latency on
+ * @LOW_LATENCY_FORCE_OFF: for low latency off
+ * @NUM_LOW_LATENCY_FORCE: max num of modes
+ */
+enum iwl_mvm_low_latency_force {
+ LOW_LATENCY_FORCE_UNSET,
+ LOW_LATENCY_FORCE_ON,
+ LOW_LATENCY_FORCE_OFF,
+ NUM_LOW_LATENCY_FORCE
+};
+
+/**
* struct iwl_mvm_low_latency_cause - low latency set causes
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
+* @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled
+* the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE
+* @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs
+* set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag
+* in low_latency.
*/
enum iwl_mvm_low_latency_cause {
LOW_LATENCY_TRAFFIC = BIT(0),
LOW_LATENCY_DEBUGFS = BIT(1),
LOW_LATENCY_VCMD = BIT(2),
LOW_LATENCY_VIF_TYPE = BIT(3),
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4),
+ LOW_LATENCY_DEBUGFS_FORCE = BIT(5),
};
/**
@@ -360,8 +382,10 @@ struct iwl_probe_resp_data {
* @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
- * @low_latency: indicates low latency is set, see
- * enum &iwl_mvm_low_latency_cause for causes.
+ * @low_latency: bit flags for low latency
+ * see enum &iwl_mvm_low_latency_cause for causes.
+ * @low_latency_actual: boolean, indicates low latency is set,
+ * as a result from low_latency bit flags and takes force into account.
* @ps_disabled: indicates that this interface requires PS to be disabled
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
@@ -393,7 +417,8 @@ struct iwl_mvm_vif {
bool ap_ibss_active;
bool pm_enabled;
bool monitor_active;
- u8 low_latency;
+ u8 low_latency: 6;
+ u8 low_latency_actual: 1;
bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data;
@@ -778,6 +803,39 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
+struct iwl_mvm_txq {
+ struct list_head list;
+ u16 txq_id;
+ atomic_t tx_request;
+ bool stopped;
+};
+
+static inline struct iwl_mvm_txq *
+iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
+{
+ return (void *)txq->drv_priv;
+}
+
+static inline struct iwl_mvm_txq *
+iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
+{
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IEEE80211_NUM_TIDS;
+
+ return (void *)sta->txq[tid]->drv_priv;
+}
+
+/**
+ * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
+ *
+ * @sta_id: sta id
+ * @txq_tid: txq tid
+ */
+struct iwl_mvm_tvqm_txq_info {
+ u8 sta_id;
+ u8 txq_tid;
+};
+
struct iwl_mvm_dqa_txq_info {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
@@ -843,13 +901,13 @@ struct iwl_mvm {
u64 on_time_scan;
} radio_stats, accu_radio_stats;
- u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
-
- struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
+ struct list_head add_stream_txqs;
+ union {
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
+ struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
+ };
struct work_struct add_stream_wk; /* To add streams to queues */
- atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
-
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
/* NVM sections */
@@ -863,7 +921,6 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
- unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -932,6 +989,7 @@ struct iwl_mvm {
struct debugfs_blob_wrapper nvm_calib_blob;
struct debugfs_blob_wrapper nvm_prod_blob;
struct debugfs_blob_wrapper nvm_phy_sku_blob;
+ struct debugfs_blob_wrapper nvm_reg_blob;
struct iwl_mvm_frame_stats drv_rx_stats;
spinlock_t drv_stats_lock;
@@ -955,6 +1013,7 @@ struct iwl_mvm {
u8 refs[IWL_MVM_REF_COUNT];
u8 vif_count;
+ struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
/* -1 for always, 0 for never, >0 for that many times */
s8 fw_restart;
@@ -1090,7 +1149,6 @@ struct iwl_mvm {
u32 ciphers[IWL_MVM_NUM_CIPHERS];
struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
- struct iwl_mvm_tof_data tof_data;
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
@@ -1106,6 +1164,10 @@ struct iwl_mvm {
/* does a monitor vif exist (only one can exist hence bool) */
bool monitor_on;
+
+ /* sniffer data to include in radiotap */
+ __le16 cur_aid;
+
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
@@ -1150,7 +1212,6 @@ enum iwl_mvm_init_status {
IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
- IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -1207,6 +1268,19 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
return iwl_mvm_sta_from_mac80211(sta);
}
+static inline struct ieee80211_vif *
+iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
+{
+ if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac)))
+ return NULL;
+
+ if (rcu)
+ return rcu_dereference(mvm->vif_id_to_mac[vif_id]);
+
+ return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id],
+ lockdep_is_held(&mvm->mutex));
+}
+
static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
{
return !iwlwifi_mod_params.d0i3_disable &&
@@ -1470,6 +1544,11 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
+void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ unsigned int tid);
+
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
@@ -1599,7 +1678,6 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool force_assoc_off, const u8 *bssid_override);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
@@ -1615,8 +1693,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
-unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
- struct ieee80211_vif *exclude_vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
@@ -1870,17 +1946,43 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
* binding, so this has no real impact. For now, just return
* the current desired low-latency state.
*/
- return mvmvif->low_latency;
+ return mvmvif->low_latency_actual;
}
static inline
void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
enum iwl_mvm_low_latency_cause cause)
{
+ u8 new_state;
+
if (set)
mvmvif->low_latency |= cause;
else
mvmvif->low_latency &= ~cause;
+
+ /*
+ * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are
+ * allowed to actual mode.
+ */
+ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE &&
+ cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE)
+ return;
+
+ if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set)
+ /*
+ * We enter force state
+ */
+ new_state = !!(mvmvif->low_latency &
+ LOW_LATENCY_DEBUGFS_FORCE);
+ else
+ /*
+ * Check if any other one set low latency
+ */
+ new_state = !!(mvmvif->low_latency &
+ ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE |
+ LOW_LATENCY_DEBUGFS_FORCE));
+
+ mvmvif->low_latency_actual = new_state;
}
/* Return a bitmask with all the hw supported queues, except for the
@@ -1895,6 +1997,16 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
+ /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received
+ * an assert. Since we failed to bring the interface up, mac80211
+ * will not attempt to reconfig the device,
+ * which handles the dump collection in assert flow,
+ * so trigger dump collection here.
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ false, 0);
/* calling this function without using dump_start/end since at this
* point we already hold the op mode mutex
*/
@@ -1906,10 +2018,6 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
}
-/* Stop/start all mac queues in a given bitmap */
-void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
-void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
-
/* Re-configure the SCD for a queue that has already been configured */
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn);
@@ -2015,4 +2123,59 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
+/* Channel info utils */
+static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS);
+}
+
+static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci)
+{
+ return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ?
+ sizeof(struct iwl_fw_channel_info) :
+ sizeof(struct iwl_fw_channel_info_v1));
+}
+
+static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 :
+ sizeof(struct iwl_fw_channel_info) -
+ sizeof(struct iwl_fw_channel_info_v1);
+}
+
+static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci,
+ u32 chan, u8 band, u8 width,
+ u8 ctrl_pos)
+{
+ if (iwl_mvm_has_ultra_hb_channel(mvm)) {
+ ci->channel = cpu_to_le32(chan);
+ ci->band = band;
+ ci->width = width;
+ ci->ctrl_pos = ctrl_pos;
+ } else {
+ struct iwl_fw_channel_info_v1 *ci_v1 =
+ (struct iwl_fw_channel_info_v1 *)ci;
+
+ ci_v1->channel = chan;
+ ci_v1->band = band;
+ ci_v1->width = width;
+ ci_v1->ctrl_pos = ctrl_pos;
+ }
+}
+
+static inline void
+iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci,
+ struct cfg80211_chan_def *chandef)
+{
+ iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
+ (chandef->chan->band == NL80211_BAND_2GHZ ?
+ PHY_BAND_24 : PHY_BAND_5),
+ iwl_mvm_get_channel_width(chandef),
+ iwl_mvm_get_ctrl_pos(chandef));
+}
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 6fc5cc1f2b5b..7bdbd010ae6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -179,7 +179,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
ret, mvm->cfg->name);
- ret = -EIO;
+ ret = -ENODATA;
}
goto exit;
}
@@ -380,8 +380,12 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
size_read);
- if (ret < 0)
+ if (ret == -ENODATA) {
+ ret = 0;
continue;
+ }
+ if (ret < 0)
+ break;
size_read += ret;
temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
if (!temp) {
@@ -412,6 +416,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_phy_sku_blob.data = temp;
mvm->nvm_phy_sku_blob.size = ret;
break;
+ case NVM_SECTION_TYPE_REGULATORY_SDP:
+ case NVM_SECTION_TYPE_REGULATORY:
+ mvm->nvm_reg_blob.data = temp;
+ mvm->nvm_reg_blob.size = ret;
+ break;
default:
if (section == mvm->cfg->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp;
@@ -454,7 +463,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
mvm->nvm_data->nvm_version);
- return 0;
+ return ret < 0 ? ret : 0;
}
struct iwl_mcc_update_resp *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 30c5127034a0..5e4f8b767d10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -301,8 +301,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
RX_HANDLER_SYNC),
- RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
- RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
@@ -329,8 +327,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(SCAN_REQ_UMAC),
HCMD_NAME(SCAN_ABORT_UMAC),
HCMD_NAME(SCAN_COMPLETE_UMAC),
- HCMD_NAME(TOF_CMD),
- HCMD_NAME(TOF_NOTIFICATION),
HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
HCMD_NAME(ADD_STA_KEY),
HCMD_NAME(ADD_STA),
@@ -449,6 +445,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+ HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -464,6 +461,22 @@ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
+ HCMD_NAME(TOF_RANGE_REQ_CMD),
+ HCMD_NAME(TOF_CONFIG_CMD),
+ HCMD_NAME(TOF_RANGE_ABORT_CMD),
+ HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
+ HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
+ HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
+ HCMD_NAME(TOF_LC_NOTIF),
+ HCMD_NAME(TOF_RESPONDER_STATS),
+ HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
+ HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -483,6 +496,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
@@ -685,6 +699,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+ INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
@@ -736,6 +751,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.rx_buf_size = rb_size_default;
}
+ BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) !=
+ LDBG_CFG_COMMAND_SIZE);
+
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
@@ -842,8 +860,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_is_d0i3_supported(mvm))
iwl_trans_unref(mvm->trans);
- iwl_mvm_tof_init(mvm);
-
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
return op_mode;
@@ -909,8 +925,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
cancel_delayed_work_sync(&mvm->tcm.work);
- iwl_mvm_tof_clean(mvm);
-
iwl_fw_runtime_free(&mvm->fwrt);
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);
@@ -1079,24 +1093,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
-void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
-{
- int q;
-
- if (WARN_ON_ONCE(!mq))
- return;
-
- for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
- if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "mac80211 %d already stopped\n", q);
- continue;
- }
-
- ieee80211_stop_queue(mvm->hw, q);
- }
-}
-
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
const struct iwl_device_cmd *cmd)
{
@@ -1109,38 +1105,66 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
iwl_trans_block_txq_ptrs(mvm->trans, false);
}
-static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ int hw_queue, bool start)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
+ struct ieee80211_sta *sta;
+ struct ieee80211_txq *txq;
+ struct iwl_mvm_txq *mvmtxq;
+ int i;
+ unsigned long tid_bitmap;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
- iwl_mvm_stop_mac_queues(mvm, mq);
-}
+ sta_id = iwl_mvm_has_new_tx_api(mvm) ?
+ mvm->tvqm_info[hw_queue].sta_id :
+ mvm->queue_info[hw_queue].ra_sta_id;
-void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
-{
- int q;
-
- if (WARN_ON_ONCE(!mq))
+ if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
return;
- for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
- if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "mac80211 %d still stopped\n", q);
- continue;
- }
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ goto out;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
- ieee80211_wake_queue(mvm->hw, q);
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int tid = mvm->tvqm_info[hw_queue].txq_tid;
+
+ tid_bitmap = BIT(tid);
+ } else {
+ tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
}
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int tid = i;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IEEE80211_NUM_TIDS;
+
+ txq = sta->txq[tid];
+ mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ mvmtxq->stopped = !start;
+
+ if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ }
+
+out:
+ rcu_read_unlock();
}
-static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
+ iwl_mvm_queue_state_change(op_mode, hw_queue, false);
+}
- iwl_mvm_start_mac_queues(mvm, mq);
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mvm_queue_state_change(op_mode, hw_queue, true);
}
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 7f5434b34d0d..f369173db11c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -143,14 +143,11 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
u8 chains_static, u8 chains_dynamic)
{
u8 active_cnt, idle_cnt;
+ struct iwl_phy_context_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
/* Set the channel info data */
- cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5);
-
- cmd->ci.channel = chandef->chan->hw_value;
- cmd->ci.width = iwl_mvm_get_channel_width(chandef);
- cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* Set rx the chains */
idle_cnt = chains_static;
@@ -168,17 +165,17 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2;
}
- cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
- cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
- cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+ tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ tail->rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (unlikely(mvm->dbgfs_rx_phyinfo))
- cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+ tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
#endif
- cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
/*
@@ -195,6 +192,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
{
struct iwl_phy_context_cmd cmd;
int ret;
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
/* Set the command header fields */
iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
@@ -203,9 +201,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
chains_static, chains_dynamic);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
- sizeof(struct iwl_phy_context_cmd),
- &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index dabbc04853ac..a28283ff7295 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -149,14 +149,9 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
if (he_cap && he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) {
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
- if (he_cap->he_cap_elem.phy_cap_info[3] &
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2)
- flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK;
- }
-
return flags;
}
@@ -320,12 +315,26 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
u16 size = le32_to_cpu(notif->amsdu_size);
+ int i;
if (WARN_ON(sta->max_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
+ sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (mvmsta->amsdu_enabled & BIT(i))
+ sta->max_tid_amsdu_len[i] =
+ iwl_mvm_max_amsdu_size(mvm, sta, i);
+ else
+ /*
+ * Not so elegant, but this will effectively
+ * prevent AMSDU on this TID
+ */
+ sta->max_tid_amsdu_len[i] = 1;
+ }
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 089972280daa..09866f50857a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1744,6 +1744,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum rs_action scale_action)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int i;
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
@@ -1757,6 +1758,19 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvmsta->amsdu_enabled = 0xFFFF;
mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (mvmsta->amsdu_enabled)
+ sta->max_tid_amsdu_len[i] =
+ iwl_mvm_max_amsdu_size(mvm, sta, i);
+ else
+ /*
+ * Not so elegant, but this will effectively
+ * prevent AMSDU on this TID
+ */
+ sta->max_tid_amsdu_len[i] = 1;
+ }
}
/*
@@ -3332,12 +3346,12 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
* column the rate table should look like this:
*
- * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
- * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
- * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
- * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
- * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
- * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
* rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
* rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
* rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 6653a238f32e..235ab26ca429 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -599,8 +599,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
- if (iwl_mvm_is_cdb_supported(mvm)) {
- struct mvm_statistics_general_cdb *general =
+ if (iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_general *general =
data->general;
mvmvif->beacon_stats.num_beacons =
@@ -723,7 +723,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
else
expected_size = sizeof(struct iwl_notif_statistics_v10);
} else {
- expected_size = sizeof(struct iwl_notif_statistics_cdb);
+ expected_size = sizeof(struct iwl_notif_statistics);
}
if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
@@ -753,7 +753,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
} else {
- struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
@@ -792,7 +792,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
bytes = (void *)&v11->load_stats.byte_count;
air_time = (void *)&v11->load_stats.air_time;
} else {
- struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
energy = (void *)&stats->load_stats.avg_energy;
bytes = (void *)&stats->load_stats.byte_count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 7bd8676508f5..2c56f73d688e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -192,27 +192,48 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
}
}
+static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_vendor_radiotap *radiotap;
+ int size = sizeof(*radiotap) + sizeof(__le16);
+
+ if (!mvm->cur_aid)
+ return;
+
+ radiotap = skb_put(skb, size);
+ radiotap->align = 1;
+ /* Intel OUI */
+ radiotap->oui[0] = 0xf6;
+ radiotap->oui[1] = 0x54;
+ radiotap->oui[2] = 0x25;
+ /* radiotap sniffer config sub-namespace */
+ radiotap->subns = 1;
+ radiotap->present = 0x1;
+ radiotap->len = size - sizeof(*radiotap);
+ radiotap->pad = 0;
+
+ /* fill the data now */
+ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
+
+ rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA;
+}
+
/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ bool csi)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
if (!(rx_status->flag & RX_FLAG_NO_PSDU) &&
- iwl_mvm_check_pn(mvm, skb, queue, sta)) {
+ iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
- } else {
- unsigned int radiotap_len = 0;
-
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
- radiotap_len += sizeof(struct ieee80211_radiotap_he);
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
- radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
- __skb_push(skb, radiotap_len);
+ else
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
- }
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -473,7 +494,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta);
+ sta, false);
reorder_buf->num_stored--;
}
}
@@ -666,6 +687,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* This also covers the case of receiving a Block Ack Request
* outside a BA session; we'll pass it to mac80211 and that
* then sends a delBA action frame.
+ * This also covers pure monitor mode, in which case we won't
+ * have any BA sessions.
*/
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
return false;
@@ -1158,14 +1181,12 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
/* temporarily hide the radiotap data */
__skb_pull(skb, radiotap_len);
- if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) {
- /* report the AMPDU-EOF bit on single frames */
- if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
- rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
- if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
- }
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
@@ -1178,9 +1199,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
/* toggle is switched whenever new aggregation starts */
- if (toggle_bit != mvm->ampdu_toggle &&
- (he_type == RATE_MCS_HE_TYPE_MU ||
- he_type == RATE_MCS_HE_TYPE_SU)) {
+ if (toggle_bit != mvm->ampdu_toggle) {
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
@@ -1314,6 +1333,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
.d4 = desc->phy_data4,
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
};
+ bool csi = false;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1412,7 +1432,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* set the preamble flag if appropriate */
- if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ if (rate_n_flags & RATE_MCS_CCK_MSK &&
+ phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
@@ -1441,14 +1462,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status->ampdu_reference = mvm->ampdu_ref;
- /* toggle is switched whenever new aggregation starts */
+ /*
+ * Toggle is switched whenever new aggregation starts. Make
+ * sure ampdu_reference is never 0 so we can later use it to
+ * see if the frame was really part of an A-MPDU or not.
+ */
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
+ if (mvm->ampdu_ref == 0)
+ mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
}
+ rx_status->ampdu_reference = mvm->ampdu_ref;
}
+ if (unlikely(mvm->monitor_on))
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
+
rcu_read_lock();
if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
@@ -1602,7 +1632,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
+ sta, csi);
out:
rcu_read_unlock();
}
@@ -1705,15 +1736,24 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
- RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
+ /*
+ * take the nss from the rx_vec since the rate_n_flags has
+ * only 2 bits for the nss which gives a max of 4 ss but
+ * there may be up to 8 spatial streams
+ */
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
+ } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
+ } else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
@@ -1726,7 +1766,7 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->rate_idx = rate;
}
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, false);
out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index d1d76bb9a750..9da0dae78510 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,7 +66,7 @@ struct iwl_mvm_active_iface_iterator_data {
struct ieee80211_vif *ignore_vif;
u8 sta_vif_ap_sta_id;
enum iwl_sf_state sta_vif_state;
- int num_active_macs;
+ u32 num_active_macs;
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index e28009832da0..c5a01470a3bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -356,24 +356,16 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
return ret;
}
-static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 tid, u8 flags)
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int queue, u8 tid, u8 flags)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
- bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
int ret;
- if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
- return -EINVAL;
-
if (iwl_mvm_has_new_tx_api(mvm)) {
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
iwl_trans_txq_free(mvm->trans, queue);
return 0;
@@ -384,36 +376,15 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- /*
- * If there is another TID with the same AC - don't remove the MAC queue
- * from the mapping
- */
- if (tid < IWL_MAX_TID_COUNT) {
- unsigned long tid_bitmap =
- mvm->queue_info[queue].tid_bitmap;
- int ac = tid_to_mac80211_ac[tid];
- int i;
-
- for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
- if (tid_to_mac80211_ac[i] == ac)
- remove_mac_queue = false;
- }
- }
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
cmd.action = mvm->queue_info[queue].tid_bitmap ?
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
- "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ "Disabling TXQ #%d tids=0x%x\n",
queue,
- mvm->queue_info[queue].tid_bitmap,
- mvm->hw_queue_to_mac80211[queue]);
+ mvm->queue_info[queue].tid_bitmap);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
@@ -423,15 +394,19 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */
- WARN(mvm->queue_info[queue].tid_bitmap ||
- mvm->hw_queue_to_mac80211[queue],
- "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
- queue, mvm->hw_queue_to_mac80211[queue],
- mvm->queue_info[queue].tid_bitmap);
+ WARN(mvm->queue_info[queue].tid_bitmap,
+ "TXQ #%d info out-of-sync - tids=0x%x\n",
+ queue, mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].tid_bitmap = 0;
- mvm->hw_queue_to_mac80211[queue] = 0;
+
+ if (sta) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
@@ -517,9 +492,14 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
spin_lock_bh(&mvmsta->lock);
/* Unmap MAC queues and TIDs from this queue */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -541,10 +521,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+ struct ieee80211_sta *old_sta,
u8 new_sta_id)
{
struct iwl_mvm_sta *mvmsta;
- u8 txq_curr_ac, sta_id, tid;
+ u8 sta_id, tid;
unsigned long disable_agg_tids = 0;
bool same_sta;
int ret;
@@ -554,7 +535,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid;
@@ -570,9 +550,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
iwl_mvm_invalidate_sta_queue(mvm, queue,
disable_agg_tids, false);
- ret = iwl_mvm_disable_txq(mvm, queue,
- mvmsta->vif->hw_queue[txq_curr_ac],
- tid, 0);
+ ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
@@ -662,16 +640,15 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force, struct iwl_mvm_txq *txq)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
bool shared_queue;
- unsigned long mq;
int ret;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@@ -695,14 +672,14 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
- mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
- /* Stop MAC queues and wait for this queue to empty */
- iwl_mvm_stop_mac_queues(mvm, mq);
+ /* Stop the queue and wait for it to empty */
+ txq->stopped = true;
+
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
if (ret) {
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
@@ -743,8 +720,8 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
out:
- /* Continue using the MAC queues */
- iwl_mvm_start_mac_queues(mvm, mq);
+ /* Continue using the queue */
+ txq->stopped = false;
return ret;
}
@@ -769,7 +746,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
return -ENOSPC;
}
-static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
@@ -792,10 +769,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d (mac80211 map:0x%x)\n",
- queue, mvm->hw_queue_to_mac80211[queue]);
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
return queue;
}
@@ -805,9 +779,10 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
lockdep_assert_held(&mvm->mutex);
@@ -815,11 +790,16 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating queue for sta %d on tid %d\n",
mvmsta->sta_id, tid);
- queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
- wdg_timeout);
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
if (queue < 0)
return queue;
+ if (sta) {
+ mvmtxq->txq_id = queue;
+ mvm->tvqm_info[queue].txq_tid = tid;
+ mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
+ }
+
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
spin_lock_bh(&mvmsta->lock);
@@ -829,8 +809,9 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
return 0;
}
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 sta_id, u8 tid)
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ int queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
@@ -845,14 +826,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
if (mvm->queue_info[queue].tid_bitmap)
enable_queue = false;
- if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
- WARN(mac80211_queue >=
- BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
- "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
- mac80211_queue, queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- }
-
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
mvm->queue_info[queue].ra_sta_id = sta_id;
@@ -866,16 +839,22 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
mvm->queue_info[queue].txq_tid = tid;
}
+ if (sta) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ mvmtxq->txq_id = queue;
+ }
+
IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
- queue, mvm->queue_info[queue].tid_bitmap,
- mvm->hw_queue_to_mac80211[queue]);
+ "Enabling TXQ #%d tids=0x%x\n",
+ queue, mvm->queue_info[queue].tid_bitmap);
return enable_queue;
}
-static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u16 ssn,
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
@@ -895,8 +874,7 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
return false;
/* Send the enabling command if we need to */
- if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid))
+ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
return false;
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
@@ -989,9 +967,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
- tid_to_mac80211_ac[tid], ssn,
- wdg_timeout, true);
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true,
+ iwl_mvm_txq_from_tid(sta, tid));
if (ret) {
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
return;
@@ -1068,11 +1047,9 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
u16 tid_bitmap;
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -1105,10 +1082,6 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* sure all TIDs have existing corresponding mac queues enabled
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->hw_queue_to_mac80211[queue] |=
- BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
- }
/* If the queue is marked as shared - "unshare" it */
if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
@@ -1136,6 +1109,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
unsigned long unshare_queues = 0;
unsigned long changetid_queues = 0;
int i, ret, free_queue = -ENOSPC;
+ struct ieee80211_sta *queue_owner = NULL;
lockdep_assert_held(&mvm->mutex);
@@ -1201,13 +1175,14 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
inactive_tid_bitmap,
&unshare_queues,
&changetid_queues);
- if (ret >= 0 && free_queue < 0)
+ if (ret >= 0 && free_queue < 0) {
+ queue_owner = sta;
free_queue = ret;
+ }
/* only unlock sta lock - we still need the queue info lock */
spin_unlock_bh(&mvmsta->lock);
}
- rcu_read_unlock();
/* Reconfigure queues requiring reconfiguation */
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
@@ -1216,18 +1191,21 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
iwl_mvm_change_queue_tid(mvm, i);
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
- ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
- if (ret)
+ if (ret) {
+ rcu_read_unlock();
return ret;
+ }
}
+ rcu_read_unlock();
+
return free_queue;
}
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, u8 ac, int tid,
- struct ieee80211_hdr *hdr)
+ struct ieee80211_sta *sta, u8 ac, int tid)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_trans_txq_scd_cfg cfg = {
@@ -1238,7 +1216,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
};
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
@@ -1257,12 +1234,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
spin_unlock_bh(&mvmsta->lock);
- /*
- * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
- * exists
- */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ if (tid == IWL_MAX_TID_COUNT) {
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE);
@@ -1341,8 +1313,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
}
- inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
- ssn, &cfg, wdg_timeout);
+ inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
/*
* Mark queue as shared in transport if shared
@@ -1384,8 +1355,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
} else {
/* Redirect queue, if needed */
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
- wdg_timeout, false);
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
+ wdg_timeout, false,
+ iwl_mvm_txq_from_tid(sta, tid));
if (ret)
goto out_err;
}
@@ -1393,7 +1365,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return 0;
out_err:
- iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+ iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
return ret;
}
@@ -1406,87 +1378,32 @@ static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
return tid_to_mac80211_ac[tid];
}
-static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, int tid)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- struct sk_buff_head deferred_tx;
- u8 mac_queue;
- bool no_queue = false; /* Marks if there is a problem with the queue */
- u8 ac;
-
- lockdep_assert_held(&mvm->mutex);
-
- skb = skb_peek(&tid_data->deferred_tx_frames);
- if (!skb)
- return;
- hdr = (void *)skb->data;
-
- ac = iwl_mvm_tid_to_ac_queue(tid);
- mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
-
- if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
- iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
- IWL_ERR(mvm,
- "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
- mvmsta->sta_id, tid);
-
- /*
- * Mark queue as problematic so later the deferred traffic is
- * freed, as we can do nothing with it
- */
- no_queue = true;
- }
-
- __skb_queue_head_init(&deferred_tx);
-
- /* Disable bottom-halves when entering TX path */
- local_bh_disable();
- spin_lock(&mvmsta->lock);
- skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
- mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
- spin_unlock(&mvmsta->lock);
-
- while ((skb = __skb_dequeue(&deferred_tx)))
- if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
- ieee80211_free_txskb(mvm->hw, skb);
- local_bh_enable();
-
- /* Wake queue */
- iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
-}
-
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
add_stream_wk);
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- unsigned long deferred_tid_traffic;
- int sta_id, tid;
mutex_lock(&mvm->mutex);
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
- /* Go over all stations with deferred traffic */
- for_each_set_bit(sta_id, mvm->sta_deferred_frames,
- IWL_MVM_STATION_COUNT) {
- clear_bit(sta_id, mvm->sta_deferred_frames);
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
- continue;
+ while (!list_empty(&mvm->add_stream_txqs)) {
+ struct iwl_mvm_txq *mvmtxq;
+ struct ieee80211_txq *txq;
+ u8 tid;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+ mvmtxq = list_first_entry(&mvm->add_stream_txqs,
+ struct iwl_mvm_txq, list);
+
+ txq = container_of((void *)mvmtxq, struct ieee80211_txq,
+ drv_priv);
+ tid = txq->tid;
+ if (tid == IEEE80211_NUM_TIDS)
+ tid = IWL_MAX_TID_COUNT;
- for_each_set_bit(tid, &deferred_tid_traffic,
- IWL_MAX_TID_COUNT + 1)
- iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+ iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
+ list_del_init(&mvmtxq->list);
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
}
mutex_unlock(&mvm->mutex);
@@ -1542,10 +1459,11 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
* Note that re-enabling aggregations isn't done in this function.
*/
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta)
+ struct ieee80211_sta *sta)
{
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg =
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
int i;
struct iwl_trans_txq_scd_cfg cfg = {
.sta_id = mvm_sta->sta_id,
@@ -1561,23 +1479,18 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
int txq_id = tid_data->txq_id;
int ac;
- u8 mac_queue;
if (txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- skb_queue_head_init(&tid_data->deferred_tx_frames);
-
ac = tid_to_mac80211_ac[i];
- mac_queue = mvm_sta->vif->hw_queue[ac];
if (iwl_mvm_has_new_tx_api(mvm)) {
IWL_DEBUG_TX_QUEUES(mvm,
"Re-mapping sta %d tid %d\n",
mvm_sta->sta_id, i);
- txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
- mvm_sta->sta_id,
- i, wdg_timeout);
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
+ i, wdg);
tid_data->txq_id = txq_id;
/*
@@ -1600,8 +1513,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
"Re-mapping sta %d tid %d to queue %d\n",
mvm_sta->sta_id, i, txq_id);
- iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
- wdg_timeout);
+ iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
}
}
@@ -1691,7 +1603,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (ret)
goto err;
- iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+ iwl_mvm_realloc_queues_after_restart(mvm, sta);
sta_update = true;
sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
goto update_fw;
@@ -1724,9 +1636,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
* frames until the queue is allocated
*/
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
- skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
- mvm_sta->deferred_traffic_tid_map = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ INIT_LIST_HEAD(&mvmtxq->list);
+ atomic_set(&mvmtxq->tx_request, 0);
+ }
+
mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -1861,9 +1781,9 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mvm_sta *mvm_sta)
+ struct ieee80211_sta *sta)
{
- int ac;
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
int i;
lockdep_assert_held(&mvm->mutex);
@@ -1872,11 +1792,17 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
continue;
- ac = iwl_mvm_tid_to_ac_queue(i);
- iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
- vif->hw_queue[ac], i, 0);
+ iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
+ 0);
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
}
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
}
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
@@ -1938,7 +1864,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
- iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ iwl_mvm_disable_sta_queues(mvm, vif, sta);
/* If there is a TXQ still marked as reserved - free it */
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
@@ -2044,7 +1970,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
if (iwl_mvm_has_new_tx_api(mvm)) {
int tvqm_queue =
- iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+ iwl_mvm_tvqm_enable_txq(mvm, sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
*queue = tvqm_queue;
@@ -2057,7 +1983,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
.frame_limit = IWL_FRAME_LIMIT,
};
- iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
+ iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
}
}
@@ -2135,8 +2061,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
- IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2195,8 +2120,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
bsta->tfd_queue_msk |= BIT(queue);
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
- &cfg, wdg_timeout);
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
}
if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -2215,8 +2139,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* to firmware so enable queue here - after the station was added
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
- queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
- bsta->sta_id,
+ queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
IWL_MAX_TID_COUNT,
wdg_timeout);
@@ -2254,7 +2177,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
return;
}
- iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
+ iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
if (iwl_mvm_has_new_tx_api(mvm))
return;
@@ -2377,10 +2300,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* Note that this is done here as we want to avoid making DQA
* changes in mac80211 layer.
*/
- if (vif->type == NL80211_IFTYPE_ADHOC) {
- vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
- mvmvif->cab_queue = vif->cab_queue;
- }
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
/*
* While in previous FWs we had to exclude cab queue from TFD queue
@@ -2388,9 +2309,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
*/
if (!iwl_mvm_has_new_tx_api(mvm) &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
- &cfg, timeout);
- msta->tfd_queue_msk |= BIT(vif->cab_queue);
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
+ timeout);
+ msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
}
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
mvmvif->id, mvmvif->color);
@@ -2407,15 +2328,14 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* tfd_queue_mask.
*/
if (iwl_mvm_has_new_tx_api(mvm)) {
- int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
- msta->sta_id,
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
0,
timeout);
mvmvif->cab_queue = queue;
} else if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
- iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
- &cfg, timeout);
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
+ timeout);
if (mvmvif->ap_wep_key) {
u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
@@ -2446,8 +2366,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
- iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
- 0, 0);
+ iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
@@ -2781,7 +2700,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data;
u16 normalized_ssn;
- int txq_id;
+ u16 txq_id;
int ret;
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
@@ -2823,17 +2742,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
txq_id = mvmsta->tid_data[tid].txq_id;
if (txq_id == IWL_MVM_INVALID_QUEUE) {
- txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
- IWL_MVM_DQA_MIN_DATA_QUEUE,
- IWL_MVM_DQA_MAX_DATA_QUEUE);
- if (txq_id < 0) {
- ret = txq_id;
+ ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (ret < 0) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto out;
}
+ txq_id = ret;
+
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+ } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
+ ret = -ENXIO;
+ IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
+ tid, IWL_MAX_HW_QUEUES - 1);
+ goto out;
+
} else if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED)) {
ret = -ENXIO;
@@ -2976,8 +2902,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (alloc_queue)
- iwl_mvm_enable_txq(mvm, queue,
- vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
+ iwl_mvm_enable_txq(mvm, sta, queue, ssn,
&cfg, wdg_timeout);
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d52cd888f77d..0614296244b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -297,7 +297,6 @@ enum iwl_mvm_agg_state {
/**
* struct iwl_mvm_tid_data - holds the states for each RA / TID
- * @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++).
@@ -318,7 +317,6 @@ enum iwl_mvm_agg_state {
* tpt_meas_start
*/
struct iwl_mvm_tid_data {
- struct sk_buff_head deferred_tx_frames;
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
@@ -427,8 +425,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
- u16 deferred_traffic_tid_map;
-
u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index e02f4eb20359..859aa5a4e6b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -399,6 +399,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct iwl_tdls_channel_switch_cmd cmd = {0};
+ struct iwl_tdls_channel_switch_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -414,9 +417,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
cmd.switch_type = type;
- cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
- cmd.timing.switch_time = cpu_to_le32(switch_time);
- cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+ tail->timing.frame_timestamp = cpu_to_le32(timestamp);
+ tail->timing.switch_time = cpu_to_le32(switch_time);
+ tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
rcu_read_lock();
sta = ieee80211_find_sta(vif, peer);
@@ -448,21 +451,16 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
}
- if (chandef) {
- cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
- PHY_BAND_24 : PHY_BAND_5);
- cmd.ci.channel = chandef->chan->hw_value;
- cmd.ci.width = iwl_mvm_get_channel_width(chandef);
- cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
- }
+ if (chandef)
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
/* keep quota calculation simple for now - 50% of DTIM for TDLS */
- cmd.timing.max_offchan_duration =
+ tail->timing.max_offchan_duration =
cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int) / 2);
/* Switch time is the first element in the switch-timing IE. */
- cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+ tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
info = IEEE80211_SKB_CB(skb);
hdr = (void *)skb->data;
@@ -472,20 +470,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
ret = -EINVAL;
goto out;
}
- iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+ iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
}
- iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
+ iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
mvmsta->sta_id);
- iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
+ iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
hdr->frame_control);
rcu_read_unlock();
- memcpy(cmd.frame.data, skb->data, skb->len);
+ memcpy(tail->frame.data, skb->data, skb->len);
- ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e1a6f4e22253..5b34100e9099 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -334,6 +334,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
+ set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
@@ -686,6 +687,8 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data)
{
struct iwl_hs20_roc_req aux_cmd = {};
+ u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
+
u32 uid;
int ret;
@@ -699,7 +702,7 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
le32_to_cpu(aux_cmd.event_unique_id));
ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
- sizeof(aux_cmd), &aux_cmd);
+ len, &aux_cmd);
if (WARN_ON(ret))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
deleted file mode 100644
index 01e0a999063b..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include "mvm.h"
-#include "fw/api/tof.h"
-
-#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
-
-void iwl_mvm_tof_init(struct iwl_mvm *mvm)
-{
- struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return;
-
- memset(tof_data, 0, sizeof(*tof_data));
-
- tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (IWL_MVM_TOF_IS_RESPONDER) {
- tof_data->responder_cfg.sub_grp_cmd_id =
- cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
- tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
- }
-#endif
-
- tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
- tof_data->range_req.req_timeout = 1;
- tof_data->range_req.initiator = 1;
- tof_data->range_req.report_policy = 3;
-
- tof_data->range_req_ext.sub_grp_cmd_id =
- cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
-
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
- mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
-}
-
-void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
-{
- struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
-
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
- !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
- return;
-
- memset(tof_data, 0, sizeof(*tof_data));
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
- mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
-}
-
-static void iwl_tof_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- bool *enabled = _data;
-
- /* non bss vif exists */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
- *enabled = false;
-}
-
-int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
-{
- struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
- bool enabled;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_tof_iterator, &enabled);
- if (!enabled) {
- IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
- return -EINVAL;
- }
-
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(*cmd), cmd);
-}
-
-int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
-{
- struct iwl_tof_range_abort_cmd cmd = {
- .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
- .request_id = id,
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (id != mvm->tof_data.active_range_request) {
- IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
- id, mvm->tof_data.active_range_request);
- return -EINVAL;
- }
-
- /* after abort is sent there's no active request anymore */
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
-
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
- !mvmvif->ap_ibss_active) {
- IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
- return -EIO;
- }
-
- cmd->sta_id = mvmvif->bcast_sta.sta_id;
- memcpy(cmd->bssid, vif->addr, ETH_ALEN);
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(*cmd), cmd);
-}
-#endif
-
-int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
- .len = { sizeof(mvm->tof_data.range_req), },
- /* no copy because of the command size */
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
- return -EIO;
- }
-
- /* nesting of range requests is not supported in FW */
- if (mvm->tof_data.active_range_request !=
- IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
- IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
- mvm->tof_data.active_range_request);
- return -EIO;
- }
-
- mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
-
- cmd.data[0] = &mvm->tof_data.range_req;
- return iwl_mvm_send_cmd(mvm, &cmd);
-}
-
-int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- lockdep_assert_held(&mvm->mutex);
-
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
- return -EINVAL;
-
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
- return -EIO;
- }
-
- return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
- IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(mvm->tof_data.range_req_ext),
- &mvm->tof_data.range_req_ext);
-}
-
-static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
-{
- struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
-
- if (resp->request_id != mvm->tof_data.active_range_request) {
- IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
- resp->request_id, mvm->tof_data.active_range_request);
- return -EIO;
- }
-
- memcpy(&mvm->tof_data.range_resp, resp,
- sizeof(struct iwl_tof_range_rsp_ntfy));
- mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
-
- return 0;
-}
-
-static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
-{
- struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
-
- IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
- return 0;
-}
-
-static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
-{
- struct iwl_tof_neighbor_report *report =
- (struct iwl_tof_neighbor_report *)data;
-
- IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
- report->bssid, report->request_token, report->status);
- return 0;
-}
-
-void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
-
- lockdep_assert_held(&mvm->mutex);
-
- switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
- case TOF_RANGE_RESPONSE_NOTIF:
- iwl_mvm_tof_range_resp(mvm, resp->data);
- break;
- case TOF_MCSI_DEBUG_NOTIF:
- iwl_mvm_tof_mcsi_notif(mvm, resp->data);
- break;
- case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
- iwl_mvm_tof_nb_report_notif(mvm, resp->data);
- break;
- default:
- IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
- resp->sub_grp_cmd_id);
- break;
- }
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
deleted file mode 100644
index 8138d0606c52..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __tof_h__
-#define __tof_h__
-
-#include "fw/api/tof.h"
-
-struct iwl_mvm_tof_data {
- struct iwl_tof_config_cmd tof_cfg;
- struct iwl_tof_range_req_cmd range_req;
- struct iwl_tof_range_req_ext_cmd range_req_ext;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct iwl_tof_responder_config_cmd responder_cfg;
-#endif
- struct iwl_tof_range_rsp_ntfy range_resp;
- u8 last_abort_id;
- u16 active_range_request;
-};
-
-void iwl_mvm_tof_init(struct iwl_mvm *mvm);
-void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
-int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
-int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
-int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
-int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-#endif
-#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 995fe2a6abbb..ac62eb8c4b36 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -533,10 +533,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/*
* For data packets rate info comes from the fw. Only
- * set rate/antenna during connection establishment.
+ * set rate/antenna during connection establishment or in case
+ * no station is given.
*/
- if (sta && (!ieee80211_is_data(hdr->frame_control) ||
- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
+ if (!sta || !ieee80211_is_data(hdr->frame_control) ||
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@@ -602,11 +603,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
}
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info, __le16 fc)
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr)
{
- struct iwl_mvm_vif *mvmvif;
-
- mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ __le16 fc = hdr->frame_control;
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
@@ -625,7 +627,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
(!ieee80211_is_bufferable_mmpdu(fc) ||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
return mvm->probe_queue;
- if (info->hw_queue == info->control.vif->cab_queue)
+
+ if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
+ is_multicast_ether_addr(hdr->addr1))
return mvmvif->cab_queue;
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
@@ -634,8 +638,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
return mvm->p2p_dev_queue;
- if (info->hw_queue == info->control.vif->cab_queue)
- return mvmvif->cab_queue;
WARN_ON_ONCE(1);
return mvm->p2p_dev_queue;
@@ -713,6 +715,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
+ IEEE80211_TX_CTL_TX_OFFCHAN;
int queue = -1;
memcpy(&info, skb->cb, sizeof(info));
@@ -720,11 +724,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1;
- if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
- (!info.control.vif ||
- info.hw_queue != info.control.vif->cab_queue)))
- return -1;
-
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
@@ -737,14 +736,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
else
sta_id = mvmvif->mcast_sta.sta_id;
- queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
- hdr->frame_control);
-
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
+ offchannel) {
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
* that can be used in 2 different types of vifs, P2P &
@@ -758,8 +755,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
- if (queue < 0)
+ if (queue < 0) {
+ IWL_ERR(mvm, "No queue was found. Dropping TX\n");
return -1;
+ }
if (unlikely(ieee80211_is_probe_resp(fc)))
iwl_mvm_probe_resp_set_noa(mvm, skb);
@@ -781,6 +780,35 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return 0;
}
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, unsigned int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
+ u8 ac = tid_to_mac80211_ac[tid];
+ unsigned int txf;
+ int lmac = IWL_LMAC_24G_INDEX;
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ band == NL80211_BAND_5GHZ)
+ lmac = IWL_LMAC_5G_INDEX;
+
+ /* For HE redirect to trigger based fifos */
+ if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ ac += 4;
+
+ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+
+ /*
+ * Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+ return min_t(unsigned int, mvmsta->max_amsdu_len,
+ mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
#ifdef CONFIG_INET
static int
@@ -850,36 +878,6 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
return 0;
}
-static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta,
- unsigned int tid)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
- u8 ac = tid_to_mac80211_ac[tid];
- unsigned int txf;
- int lmac = IWL_LMAC_24G_INDEX;
-
- if (iwl_mvm_is_cdb_supported(mvm) &&
- band == NL80211_BAND_5GHZ)
- lmac = IWL_LMAC_5G_INDEX;
-
- /* For HE redirect to trigger based fifos */
- if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
- ac += 4;
-
- txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
-
- /*
- * Don't send an AMSDU that will be longer than the TXF.
- * Add a security margin of 256 for the TX command + headers.
- * We also want to have the start of the next packet inside the
- * fifo to be able to send bursts.
- */
- return min_t(unsigned int, mvmsta->max_amsdu_len,
- mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
-}
-
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -1002,34 +1000,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
#endif
-static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta, u8 tid,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u8 mac_queue = info->hw_queue;
- struct sk_buff_head *deferred_tx_frames;
-
- lockdep_assert_held(&mvm_sta->lock);
-
- mvm_sta->deferred_traffic_tid_map |= BIT(tid);
- set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
-
- deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
-
- skb_queue_tail(deferred_tx_frames, skb);
-
- /*
- * The first deferred frame should've stopped the MAC queues, so we
- * should never get a second deferred frame for the RA/TID.
- * In case of GSO the first packet may have been split, so don't warn.
- */
- if (skb_queue_len(deferred_tx_frames) == 1) {
- iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
- schedule_work(&mvm->add_stream_wk);
- }
-}
-
/* Check if there are any timed-out TIDs on a given shared TXQ */
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
{
@@ -1054,7 +1024,12 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
int airtime)
{
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ struct iwl_mvm_tcm_mac *mdata;
+
+ if (mac >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ mdata = &mvm->tcm.data[mac];
if (mvm->tcm.paused)
return;
@@ -1065,14 +1040,21 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
mdata->tx.airtime += airtime;
}
-static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvmsta, int tid)
+static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int tid)
{
u32 ac = tid_to_mac80211_ac[tid];
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ struct iwl_mvm_tcm_mac *mdata;
+
+ if (mac >= NUM_MAC_INDEX_DRIVER)
+ return -EINVAL;
+
+ mdata = &mvm->tcm.data[mac];
mdata->tx.pkts[ac]++;
+
+ return 0;
}
/*
@@ -1088,7 +1070,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
- u16 txq_id = info->hw_queue;
+ u16 txq_id;
bool is_ampdu = false;
int hdrlen;
@@ -1152,14 +1134,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
- /* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
- iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
-
- /*
- * The frame is now deferred, and the worker scheduled
- * will re-allocate it, so we can free it for now.
- */
+ if (WARN_ON_ONCE(txq_id == IWL_MVM_INVALID_QUEUE)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
@@ -1199,7 +1174,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+ if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
+ tid == IWL_MAX_TID_COUNT ? 0 : tid))
+ goto drop;
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index d116c6ae18ff..211c4638d690 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -248,7 +248,7 @@ void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
le16_to_cpu(err_resp->bad_cmd_seq_num),
le32_to_cpu(err_resp->error_service));
- IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+ IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
le64_to_cpu(err_resp->timestamp));
}
@@ -463,6 +463,9 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
sizeof(table));
+ if (table.valid)
+ mvm->fwrt.dump.umac_err_id = table.error_id;
+
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
@@ -486,11 +489,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
-static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
+static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
- u32 val;
+ u32 val, base = mvm->error_event_table[lmac_num];
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
if (!base)
@@ -541,7 +544,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (table.valid)
- mvm->fwrt.dump.rt_status = table.error_id;
+ mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -598,10 +601,10 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
return;
}
- iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
+ iwl_mvm_dump_lmac_error_log(mvm, 0);
if (mvm->error_event_table[1])
- iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
+ iwl_mvm_dump_lmac_error_log(mvm, 1);
iwl_mvm_dump_umac_error_log(mvm);
}
@@ -1133,19 +1136,14 @@ static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
"AP isn't using AMPDU with uAPSD enabled");
}
-static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- int *mac_id = data;
if (vif->type != NL80211_IFTYPE_STATION)
return;
- if (mvmvif->id != *mac_id)
- return;
-
if (!vif->bss_conf.assoc)
return;
@@ -1155,10 +1153,10 @@ static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
!mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
return;
- if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
+ if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
return;
- mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
+ mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
IWL_INFO(mvm,
"detected AP should do aggregation but isn't, likely due to U-APSD\n");
schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
@@ -1171,6 +1169,7 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
u64 tpt;
unsigned long rate;
+ struct ieee80211_vif *vif;
rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
@@ -1199,9 +1198,11 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
return;
}
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_uapsd_agg_disconnect_iter, &mac);
+ rcu_read_lock();
+ vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
+ if (vif)
+ iwl_mvm_uapsd_agg_disconnect(mvm, vif);
+ rcu_read_unlock();
}
static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 353581ccc01e..a22e47639a4e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,10 +513,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -531,17 +531,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -556,20 +556,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
@@ -593,24 +594,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -628,17 +631,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -656,17 +659,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
@@ -684,18 +687,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
@@ -710,18 +713,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -739,17 +742,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -767,19 +770,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -805,18 +808,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -834,17 +837,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
@@ -862,41 +865,88 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */
- {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0074, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x007C, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
- {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl22560_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22560_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl22560_2ax_cfg_hr)},
+
+ {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+
+ {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d6fc6ce73e0a..0d16bcc3141f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,13 +1,15 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#ifndef __iwl_trans_int_pcie_h__
#define __iwl_trans_int_pcie_h__
@@ -1029,8 +1065,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
-void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
-
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common functions that are used by gen2 transport */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 9e850c25877b..c260d1251b5f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,13 +1,15 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/sched.h>
#include <linux/wait.h>
@@ -256,6 +292,9 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
}
+
+ IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
+ (u32)rxb->vid, rxq->id, rxq->write);
}
/*
@@ -860,30 +899,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
-void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
-{
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
- return;
-
- if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
- return;
-
- if (!trans->cfg->integrated)
- return;
-
- /*
- * Turn on the chicken-bits that cause MAC wakeup for RX-related
- * values.
- * This costs some power, but needed for W/A 9000 integrated A-step
- * bug where shadow registers are not in the retention list and their
- * value is lost when NIC powers down
- */
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
- CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
- CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
-}
-
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -971,8 +986,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- iwl_pcie_enable_rx_wake(trans, true);
}
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -1360,6 +1373,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
if (rxb->invalid)
goto out_err;
+ IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
+
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
@@ -1411,11 +1426,12 @@ restart:
emergency = true;
}
+ IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
+
rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb)
goto out;
- IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
i = (i + 1) & (rxq->queue_size - 1);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f97aea5ffc44..f74281508197 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1530,8 +1530,6 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_clear_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_init_done));
- iwl_pcie_enable_rx_wake(trans, false);
-
if (reset) {
/*
* reset TX queues -- some of their registers reset during S3
@@ -1558,8 +1556,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
- iwl_pcie_enable_rx_wake(trans, true);
-
iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1968,7 +1964,7 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
struct iwl_trans_pcie_removal *removal =
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
- char *prop[] = {"EVENT=INACCESSIBLE", NULL};
+ static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
dev_err(&pdev->dev, "Device gone - attempting removal\n");
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
@@ -3118,7 +3114,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
return len;
}
-static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
+static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
if (trans->num_blocks) {
*len += sizeof(struct iwl_fw_error_dump_data) +
@@ -3173,8 +3169,7 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs = 0;
- u32 monitor_len;
+ u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
!trans->cfg->mq_rx_supported &&
@@ -3191,19 +3186,8 @@ static struct iwl_trans_dump_data
cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
- monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
-
- if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) {
- dump_data = vzalloc(len);
- if (!dump_data)
- return NULL;
-
- data = (void *)dump_data->data;
- len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
- dump_data->len = len;
-
- return dump_data;
- }
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
/* CSR registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
@@ -3569,24 +3553,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- /*
- * 9000-series integrated A-step has a problem with suspend/resume
- * and sometimes even causes the whole platform to get stuck. This
- * workaround makes the hardware not go into the problematic state.
- */
- if (trans->cfg->integrated &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
- iwl_set_bit(trans, CSR_HOST_CHICKEN,
- CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
+ IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (cfg == &iwl22000_2ax_cfg_hr) {
+ if (cfg == &iwl22560_2ax_cfg_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- trans->cfg = &iwl22000_2ax_cfg_hr;
+ trans->cfg = &iwl22560_2ax_cfg_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
trans->cfg = &iwl22000_2ax_cfg_jf;
@@ -3602,7 +3577,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ (trans->cfg != &iwl22260_2ax_cfg ||
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 156ca1b1f621..f3d2e8fe920b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -214,7 +214,11 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+ struct iwl_tfh_tb *tb;
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
/* Each TFD can point to a maximum max_tbs Tx buffers */
if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
@@ -408,7 +412,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
goto out_err;
/* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
return tfd;
out_err:
@@ -469,7 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
/* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
@@ -834,14 +838,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
- memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+ memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
tb0_size);
/* map first command fragment, if any remains */
if (copy_size > tb0_size) {
phys_addr = dma_map_single(trans->dev,
- ((u8 *)&out_cmd->hdr) + tb0_size,
+ (u8 *)out_cmd + tb0_size,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index ee990a7a5411..07395502f419 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,13 +1,15 @@
/******************************************************************************
*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,12 +20,46 @@
* more details.
*
* The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@@ -2438,8 +2474,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* building the A-MSDU might have changed this data, so memcpy it now */
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c
index c83f44f9ddf1..fe14814af300 100644
--- a/drivers/net/wireless/marvell/libertas/debugfs.c
+++ b/drivers/net/wireless/marvell/libertas/debugfs.c
@@ -708,8 +708,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
goto exit;
priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir);
- if (!priv->debugfs_dir)
- goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_files); i++) {
files = &debugfs_files[i];
@@ -721,8 +719,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
}
priv->events_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir);
- if (!priv->events_dir)
- goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_events_files); i++) {
files = &debugfs_events_files[i];
@@ -734,8 +730,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
}
priv->regs_dir = debugfs_create_dir("registers", priv->debugfs_dir);
- if (!priv->regs_dir)
- goto exit;
for (i=0; i<ARRAY_SIZE(debugfs_regs_files); i++) {
files = &debugfs_regs_files[i];
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index b0cb16ef8d1d..2315fdff56c2 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -797,7 +797,12 @@ static void lbs_persist_config_init(struct net_device *dev)
{
int ret;
ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
+ if (ret)
+ pr_err("failed to create boot_opts_group.\n");
+
ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
+ if (ret)
+ pr_err("failed to create mesh_ie_group.\n");
}
static void lbs_persist_config_remove(struct net_device *dev)
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 1d45da187b9b..a7cb7d06e5e6 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -676,7 +676,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
- lbtf_deb_leave(LBTF_DEB_MAIN);
+ lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
index 279167ddd293..524fd565cb2a 100644
--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
+++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
@@ -9,7 +9,7 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index cbe4493b3266..8ab114cf3467 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -922,9 +922,8 @@ mwifiex_reset_write(struct file *file,
}
#define MWIFIEX_DFS_ADD_FILE(name) do { \
- if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \
- priv, &mwifiex_dfs_##name##_fops)) \
- return; \
+ debugfs_create_file(#name, 0644, priv->dfs_dev_dir, priv, \
+ &mwifiex_dfs_##name##_fops); \
} while (0);
#define MWIFIEX_DFS_FILE_OPS(name) \
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index d49fbd58afa7..a85648342d15 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -489,6 +489,8 @@ static void mwifiex_sdio_coredump(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
/* Device ID for SD8801 */
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
+/* Device ID for SD8977 */
+#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
/* Device ID for SD8997 */
#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
@@ -507,6 +509,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8977},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
@@ -2726,4 +2730,5 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index dccf7fd1aef3..912de2cde8d9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -36,6 +36,7 @@
#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
@@ -371,6 +372,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
0x59, 0x5c, 0x5d},
};
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
.start_rd_port = 0,
.start_wr_port = 0,
@@ -532,6 +586,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+ .firmware = SD8977_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8977,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd8997,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index e2ba26378575..e769c8a555dd 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -300,7 +300,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
goto unmap;
- return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+ return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
unmap:
ret = -ENOMEM;
@@ -318,7 +318,7 @@ free:
EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
{
dma_addr_t addr;
void *buf;
@@ -392,7 +392,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
+ mt76_dma_rx_fill(dev, q);
}
static void
@@ -417,10 +417,9 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
+ int len, data_len, done = 0;
struct sk_buff *skb;
unsigned char *data;
- int len;
- int done = 0;
bool more;
while (done < budget) {
@@ -430,6 +429,19 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (!data)
break;
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+
+ skb_free_frag(data);
+ continue;
+ }
+
if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more);
continue;
@@ -440,12 +452,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
skb_free_frag(data);
continue;
}
-
skb_reserve(skb, q->buf_offset);
- if (skb->tail + len > skb->end) {
- dev_kfree_skb(skb);
- continue;
- }
if (q == &dev->q_rx[MT_RXQ_MCU]) {
u32 *rxfce = (u32 *) skb->cb;
@@ -463,7 +470,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
dev->drv->rx_skb(dev, q - dev->q_rx, skb);
}
- mt76_dma_rx_fill(dev, q, true);
+ mt76_dma_rx_fill(dev, q);
return done;
}
@@ -504,7 +511,7 @@ mt76_dma_init(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
- mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i]);
skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 7b926dfa6b97..ee3b65a14870 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -328,6 +328,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -547,7 +548,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
}
static void
-mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
+mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -566,6 +567,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);
+ if (status->signal <= 0)
+ ewma_signal_add(&wcid->rssi, -status->signal);
+
+ wcid->inactive_count = 0;
+
if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
return;
@@ -625,7 +631,7 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
__skb_queue_head_init(&frames);
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
- mt76_check_ps(dev, skb);
+ mt76_check_sta(dev, skb);
mt76_rx_aggr_reorder(skb, &frames);
}
@@ -659,6 +665,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_txq_init(dev, sta->txq[i]);
}
+ ewma_signal_init(&wcid->rssi);
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
@@ -709,3 +716,60 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct mt76_dev *dev = hw->priv;
+ int n_chains = __sw_hweight8(dev->antenna_mask);
+
+ *dbm = dev->txpower_cur / 2;
+
+ /* convert from per-chain power to combined
+ * output on 2x2 devices
+ */
+ if (n_chains > 1)
+ *dbm += 3;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_get_txpower);
+
+static void
+__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ ieee80211_csa_finish(vif);
+}
+
+void mt76_csa_finish(struct mt76_dev *dev)
+{
+ if (!dev->csa_complete)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(dev->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ __mt76_csa_finish, dev);
+
+ dev->csa_complete = 0;
+}
+EXPORT_SYMBOL_GPL(mt76_csa_finish);
+
+static void
+__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_dev *dev = priv;
+
+ if (!vif->csa_active)
+ return;
+
+ dev->csa_complete |= ieee80211_csa_is_complete(vif);
+}
+
+void mt76_csa_check(struct mt76_dev *dev)
+{
+ ieee80211_iterate_active_interfaces_atomic(dev->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ __mt76_csa_check, dev);
+}
+EXPORT_SYMBOL_GPL(mt76_csa_check);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5cd508a68609..2bb9db4ed80a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -23,6 +23,7 @@
#include <linux/skbuff.h>
#include <linux/leds.h>
#include <linux/usb.h>
+#include <linux/average.h>
#include <net/mac80211.h>
#include "util.h"
@@ -174,6 +175,8 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 128
+DECLARE_EWMA(signal, 10, 8);
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@@ -181,6 +184,9 @@ struct mt76_wcid {
unsigned long flags;
+ struct ewma_signal rssi;
+ int inactive_count;
+
u8 idx;
u8 hw_key_idx;
@@ -239,7 +245,9 @@ struct mt76_rx_tid {
#define MT_TX_CB_TXS_FAILED BIT(2)
#define MT_PACKET_ID_MASK GENMASK(7, 0)
-#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK
+#define MT_PACKET_ID_NO_ACK 0
+#define MT_PACKET_ID_NO_SKB 1
+#define MT_PACKET_ID_FIRST 2
#define MT_TX_STATUS_SKB_TIMEOUT HZ
@@ -421,6 +429,7 @@ struct mt76_dev {
struct mt76_queue q_tx[__MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
+ int tx_dma_idx[4];
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@@ -454,6 +463,8 @@ struct mt76_dev {
bool led_al;
u8 led_pin;
+ u8 csa_complete;
+
u32 rxfilter;
union {
@@ -488,7 +499,7 @@ struct mt76_rx_status {
u8 rate_idx;
u8 nss;
u8 band;
- u8 signal;
+ s8 signal;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
@@ -677,6 +688,14 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev);
+
+int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int *dbm);
+
+void mt76_csa_check(struct mt76_dev *dev);
+void mt76_csa_finish(struct mt76_dev *dev);
+
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index a1657922758e..0290ba5869a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -88,6 +88,7 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
{ MT_TX_PROT_CFG6, 0xe3f42004 },
{ MT_TX_PROT_CFG7, 0xe3f42084 },
{ MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
};
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index d895b6f3dc44..1472c8699b29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -99,7 +99,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.wake_tx_queue = mt76_wake_tx_queue,
.get_survey = mt76_get_survey,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
.flush = mt76x0e_flush,
.set_tim = mt76x0e_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
@@ -141,6 +141,15 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ mt76_wr(dev, MT_CH_TIME_CFG,
+ MT_CH_TIME_CFG_TIMER_EN |
+ MT_CH_TIME_CFG_TX_AS_BUSY |
+ MT_CH_TIME_CFG_RX_AS_BUSY |
+ MT_CH_TIME_CFG_NAV_AS_BUSY |
+ MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
+ FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
err = mt76x0_register_device(dev);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 1eb1a802ed20..5a4c6f34267e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -1013,6 +1013,8 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
+ mt76x02_edcca_init(dev);
+
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
@@ -1075,7 +1077,9 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ if (!dev->cal.avg_rssi_all)
+ dev->cal.avg_rssi_all = -75;
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0e6b43bb4678..f66e1b2f0980 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -155,7 +155,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
};
static int mt76x0u_register_device(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 9d7585029df9..f391d2d21fbc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
+#include <linux/module.h>
#include "mt76x0.h"
#include "mcu.h"
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 6782665049dd..6d96766a6ed3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -15,8 +15,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76X02_UTIL_H
-#define __MT76X02_UTIL_H
+#ifndef __MT76x02_H
+#define __MT76x02_H
#include <linux/kfifo.h>
@@ -28,6 +28,9 @@
#define MT_CALIBRATE_INTERVAL HZ
+#define MT_WATCHDOG_TIME (HZ / 10)
+#define MT_TX_HANG_TH 10
+
#define MT_MAX_CHAINS 2
struct mt76x02_rx_freq_cal {
s8 high_gain[MT_MAX_CHAINS];
@@ -79,6 +82,7 @@ struct mt76x02_dev {
struct tasklet_struct pre_tbtt_tasklet;
struct delayed_work cal_work;
struct delayed_work mac_work;
+ struct delayed_work wdt_work;
u32 aggr_stats[32];
@@ -89,6 +93,9 @@ struct mt76x02_dev {
u8 tbtt_count;
u16 beacon_int;
+ u32 tx_hang_reset;
+ u8 tx_hang_check;
+
struct mt76x02_calibration cal;
s8 target_power;
@@ -101,6 +108,12 @@ struct mt76x02_dev {
u8 slottime;
struct mt76x02_dfs_pattern_detector dfs_pd;
+
+ /* edcca monitor */
+ bool ed_tx_blocked;
+ bool ed_monitor;
+ u8 ed_trigger;
+ u8 ed_silent;
};
extern struct ieee80211_rate mt76x02_rates[12];
@@ -136,6 +149,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate);
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
s8 max_txpwr_adj);
+void mt76x02_wdt_work(struct work_struct *work);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
@@ -158,8 +172,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-int mt76x02_get_txpower(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int *dbm);
void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -224,4 +236,4 @@ mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
return &sta->vif->group_wcid;
}
-#endif
+#endif /* __MT76x02_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index a9d52ba1e270..7580c5c986ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -133,5 +133,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
read_txpower);
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
+
+ debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset);
}
EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 054609c634a2..19fdcab746a0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -881,12 +881,18 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ mutex_lock(&dev->mt76.mutex);
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
+
+ dev->ed_monitor = region == NL80211_DFS_ETSI;
+ mt76x02_edcca_init(dev);
+
dfs_pd->region = region;
mt76x02_dfs_init_params(dev);
tasklet_enable(&dfs_pd->dfs_tasklet);
}
+ mutex_unlock(&dev->mt76.mutex);
}
void mt76x02_regd_notifier(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index c08bf371e527..63fa27d2c404 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -130,10 +130,8 @@ static __le16
mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
+ u8 phy, rate_idx, nss, bw = 0;
u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
rate_idx = rate->idx;
@@ -164,7 +162,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
phy = val >> 8;
rate_idx = val & 0xff;
- bw = 0;
+ nss = 1;
}
rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
@@ -435,7 +433,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
}
if (wcid) {
- if (stat->pktid)
+ if (stat->pktid >= MT_PACKET_ID_FIRST)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid, &list);
if (status.skb)
@@ -478,7 +476,9 @@ out:
}
static int
-mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+mt76x02_mac_process_rate(struct mt76x02_dev *dev,
+ struct mt76_rx_status *status,
+ u16 rate)
{
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
@@ -510,11 +510,15 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
break;
- case MT_PHY_TYPE_VHT:
+ case MT_PHY_TYPE_VHT: {
+ u8 n_rxstream = dev->mt76.chainmask & 0xf;
+
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+ status->nss = min_t(u8, n_rxstream,
+ FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1);
break;
+ }
default:
return -EINVAL;
}
@@ -644,7 +648,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->chains = BIT(0);
signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
- for (i = 1; i < nstreams; i++) {
+ for (i = 0; i < nstreams; i++) {
status->chains |= BIT(i);
status->chain_signal[i] = mt76x02_mac_get_rssi(dev,
rxwi->rssi[i],
@@ -658,12 +662,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
- if (sta) {
- ewma_signal_add(&sta->rssi, status->signal);
- sta->inactive_count = 0;
- }
-
- return mt76x02_mac_process_rate(status, rate);
+ return mt76x02_mac_process_rate(dev, status, rate);
}
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
@@ -715,7 +714,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
-void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
{
u32 data = 0;
@@ -729,20 +728,89 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
mt76_rmw(dev, MT_OFDM_PROT_CFG,
MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_MM20_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_MM40_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_GF20_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_GF40_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG6,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG7,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG8,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
+
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ u32 vht_prot[3];
+ int i;
+ u16 rts_thr;
+
+ for (i = 0; i < ARRAY_SIZE(prot); i++) {
+ prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
+ prot[i] &= ~MT_PROT_CFG_CTRL;
+ if (i >= 2)
+ prot[i] &= ~MT_PROT_CFG_RATE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vht_prot); i++) {
+ vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
+ vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE);
+ }
+
+ rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
+
+ if (rts_thr != 0xffff)
+ prot[0] |= MT_PROT_CTRL_RTS_CTS;
+
+ if (legacy_prot) {
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+
+ vht_prot[0] |= MT_PROT_RATE_CCK_11;
+ vht_prot[1] |= MT_PROT_RATE_CCK_11;
+ vht_prot[2] |= MT_PROT_RATE_CCK_11;
+ } else {
+ if (rts_thr != 0xffff)
+ prot[1] |= MT_PROT_CTRL_RTS_CTS;
+
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+
+ vht_prot[0] |= MT_PROT_RATE_OFDM_24;
+ vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24;
+ vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ prot[3] |= MT_PROT_CTRL_RTS_CTS;
+ prot[4] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[0] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot[3] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[1] |= MT_PROT_CTRL_RTS_CTS;
+ vht_prot[2] |= MT_PROT_CTRL_RTS_CTS;
+ break;
+ }
+
+ if (non_gf) {
+ prot[4] |= MT_PROT_CTRL_RTS_CTS;
+ prot[5] |= MT_PROT_CTRL_RTS_CTS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(prot); i++)
+ mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+
+ for (i = 0; i < ARRAY_SIZE(vht_prot); i++)
+ mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
}
void mt76x02_update_channel(struct mt76_dev *mdev)
@@ -774,8 +842,90 @@ static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
- mt76_clear(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
+{
+ if (enable) {
+ u32 data;
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+ /* enable pa-lna */
+ data = mt76_rr(dev, MT_TX_PIN_CFG);
+ data |= MT_TX_PIN_CFG_TXANT |
+ MT_TX_PIN_CFG_RXANT |
+ MT_TX_PIN_RFTR_EN |
+ MT_TX_PIN_TRSW_EN;
+ mt76_wr(dev, MT_TX_PIN_CFG, data);
+ } else {
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
+ /* disable pa-lna */
+ mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
+ mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
+ }
+ dev->ed_tx_blocked = !enable;
+}
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev)
+{
+ dev->ed_trigger = 0;
+ dev->ed_silent = 0;
+
+ if (dev->ed_monitor) {
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
+
+ mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+ mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
+ ed_th << 8 | ed_th);
+ if (!is_mt76x2(dev))
+ mt76_set(dev, MT_TXOP_HLDR_ET,
+ MT_TXOP_HLDR_TX40M_BLK_EN);
+ } else {
+ mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ if (is_mt76x2(dev)) {
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+ } else {
+ mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
+ mt76_clear(dev, MT_TXOP_HLDR_ET,
+ MT_TXOP_HLDR_TX40M_BLK_EN);
+ }
+ }
+ mt76x02_edcca_tx_enable(dev, true);
+}
+EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
+
+#define MT_EDCCA_TH 90
+#define MT_EDCCA_BLOCK_TH 2
+static void mt76x02_edcca_check(struct mt76x02_dev *dev)
+{
+ u32 val, busy;
+
+ val = mt76_rr(dev, MT_ED_CCA_TIMER);
+ busy = (val * 100) / jiffies_to_usecs(MT_CALIBRATE_INTERVAL);
+ busy = min_t(u32, busy, 100);
+
+ if (busy > MT_EDCCA_TH) {
+ dev->ed_trigger++;
+ dev->ed_silent = 0;
+ } else {
+ dev->ed_silent++;
+ dev->ed_trigger = 0;
+ }
+
+ if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
+ !dev->ed_tx_blocked)
+ mt76x02_edcca_tx_enable(dev, false);
+ else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
+ dev->ed_tx_blocked)
+ mt76x02_edcca_tx_enable(dev, true);
}
void mt76x02_mac_work(struct work_struct *work)
@@ -784,6 +934,8 @@ void mt76x02_mac_work(struct work_struct *work)
mac_work.work);
int i, idx;
+ mutex_lock(&dev->mt76.mutex);
+
mt76x02_update_channel(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
@@ -792,10 +944,14 @@ void mt76x02_mac_work(struct work_struct *work)
dev->aggr_stats[idx++] += val >> 16;
}
- /* XXX: check beacon stuck for ap mode */
if (!dev->beacon_mask)
mt76x02_check_mac_err(dev);
+ if (dev->ed_monitor)
+ mt76x02_edcca_check(dev);
+
+ mutex_unlock(&dev->mt76.mutex);
+
mt76_tx_status_check(&dev->mt76, NULL, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 4e597004c445..940c07f665cd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -18,8 +18,6 @@
#ifndef __MT76X02_MAC_H
#define __MT76X02_MAC_H
-#include <linux/average.h>
-
struct mt76x02_dev;
struct mt76x02_tx_status {
@@ -41,8 +39,6 @@ struct mt76x02_vif {
u8 idx;
};
-DECLARE_EWMA(signal, 10, 8);
-
struct mt76x02_sta {
struct mt76_wcid wcid; /* must be first */
@@ -50,8 +46,6 @@ struct mt76x02_sta {
struct mt76x02_tx_status status;
int n_frames;
- struct ewma_signal rssi;
- int inactive_count;
};
#define MT_RXINFO_BA BIT(0)
@@ -194,7 +188,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi);
-void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -210,4 +206,6 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
bool val);
+
+void mt76x02_edcca_init(struct mt76x02_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 66315410aebe..374bc9d91f12 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -116,14 +116,20 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete)
+ return;
+
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_add_buffered_bc, &data);
- } while (nframes != skb_queue_len(&data.q));
+ } while (nframes != skb_queue_len(&data.q) &&
+ skb_queue_len(&data.q) < 8);
- if (!nframes)
+ if (!skb_queue_len(&data.q))
return;
for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
@@ -308,8 +314,12 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
tasklet_schedule(&dev->pre_tbtt_tasklet);
/* send buffered multicast frames now */
- if (intr & MT_INT_TBTT)
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ if (intr & MT_INT_TBTT) {
+ if (dev->mt76.csa_complete)
+ mt76_csa_finish(&dev->mt76);
+ else
+ mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ }
if (intr & MT_INT_TX_STAT) {
mt76x02_mac_poll_tx_status(dev, true);
@@ -384,3 +394,127 @@ void mt76x02_mac_start(struct mt76x02_dev *dev)
MT_INT_TX_STAT);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_start);
+
+static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
+{
+ u32 dma_idx, prev_dma_idx;
+ struct mt76_queue *q;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ q = &dev->mt76.q_tx[i];
+
+ if (!q->queued)
+ continue;
+
+ prev_dma_idx = dev->mt76.tx_dma_idx[i];
+ dma_idx = ioread32(&q->regs->dma_idx);
+ dev->mt76.tx_dma_idx[i] = dma_idx;
+
+ if (prev_dma_idx == dma_idx)
+ break;
+ }
+
+ return i < 4;
+}
+
+static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
+{
+ u32 mask = dev->mt76.mmio.irqmask;
+ int i;
+
+ ieee80211_stop_queues(dev->mt76.hw);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->tx_tasklet);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
+ napi_disable(&dev->mt76.napi[i]);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->beacon_mask)
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ mt76x02_irq_disable(dev, mask);
+
+ /* perform device reset */
+ mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ usleep_range(5000, 10000);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
+
+ /* let fw reset DMA */
+ mt76_set(dev, 0x734, 0x3);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ if (dev->ed_monitor)
+ mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
+
+ if (dev->beacon_mask)
+ mt76_set(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ mt76x02_irq_enable(dev, mask);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+
+ tasklet_enable(&dev->tx_tasklet);
+ tasklet_schedule(&dev->tx_tasklet);
+
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+
+ ieee80211_wake_queues(dev->mt76.hw);
+
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
+{
+ if (mt76x02_tx_hang(dev)) {
+ if (++dev->tx_hang_check < MT_TX_HANG_TH)
+ return;
+
+ mt76x02_watchdog_reset(dev);
+
+ dev->tx_hang_reset++;
+ dev->tx_hang_check = 0;
+ memset(dev->mt76.tx_dma_idx, 0xff,
+ sizeof(dev->mt76.tx_dma_idx));
+ } else {
+ dev->tx_hang_check = 0;
+ }
+}
+
+void mt76x02_wdt_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ wdt_work.work);
+
+ mt76x02_check_tx_hang(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+ MT_WATCHDOG_TIME);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index 977a8e7e26df..a020c757ba5c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -132,53 +132,6 @@ void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
-int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
-{
- struct mt76x02_sta *sta;
- struct mt76_wcid *wcid;
- int i, j, min_rssi = 0;
- s8 cur_rssi;
-
- local_bh_disable();
- rcu_read_lock();
-
- for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
- unsigned long mask = dev->mt76.wcid_mask[i];
-
- if (!mask)
- continue;
-
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
- if (!(mask & 1))
- continue;
-
- wcid = rcu_dereference(dev->mt76.wcid[j]);
- if (!wcid)
- continue;
-
- sta = container_of(wcid, struct mt76x02_sta, wcid);
- spin_lock(&dev->mt76.rx_lock);
- if (sta->inactive_count++ < 5)
- cur_rssi = ewma_signal_read(&sta->rssi);
- else
- cur_rssi = 0;
- spin_unlock(&dev->mt76.rx_lock);
-
- if (cur_rssi < min_rssi)
- min_rssi = cur_rssi;
- }
- }
-
- rcu_read_unlock();
- local_bh_enable();
-
- if (!min_rssi)
- return -75;
-
- return min_rssi;
-}
-EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
-
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
{
int core_val, agc_val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index 2b316cf7c70c..d2971db06f13 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -51,7 +51,6 @@ void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
-int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
bool primary_upper);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index f7de77d09d28..7401cb94fb72 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -230,6 +230,29 @@
#define MT_COM_REG2 0x0738
#define MT_COM_REG3 0x073C
+#define MT_LED_CTRL 0x0770
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0 0x0774
+#define MT_LED_TX_BLINK_1 0x0778
+
+#define MT_LED_S0_BASE 0x077C
+#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE 0x0780
+#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+ MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+ MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+ MT_LED_STATUS_DURATION_MASK)
+
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
#define MT_FCE_CSO 0x0808
@@ -318,6 +341,7 @@
#define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3)
#define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4)
#define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5)
+#define MT_CH_CCA_RC_EN BIT(6)
#define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8)
#define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10)
@@ -378,6 +402,9 @@
#define MT_TX_PWR_CFG_4 0x1324
#define MT_TX_PIN_CFG 0x1328
#define MT_TX_PIN_CFG_TXANT GENMASK(3, 0)
+#define MT_TX_PIN_CFG_RXANT GENMASK(11, 8)
+#define MT_TX_PIN_RFTR_EN BIT(16)
+#define MT_TX_PIN_TRSW_EN BIT(18)
#define MT_TX_BAND_CFG 0x132c
#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
@@ -398,6 +425,7 @@
#define MT_TXOP_CTRL_CFG 0x1340
#define MT_TXOP_TRUN_EN GENMASK(5, 0)
#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_ED_CCA_EN BIT(20)
#define MT_TX_RTS_CFG 0x1344
#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
@@ -409,6 +437,7 @@
#define MT_TX_RETRY_CFG 0x134c
#define MT_TX_LINK_CFG 0x1350
+#define MT_TX_CFACK_EN BIT(12)
#define MT_VHT_HT_FBK_CFG0 0x1354
#define MT_VHT_HT_FBK_CFG1 0x1358
#define MT_LG_FBK_CFG0 0x135c
@@ -440,9 +469,10 @@
#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
#define MT_PROT_RTS_THR_EN BIT(26)
#define MT_PROT_RATE_CCK_11 0x0003
-#define MT_PROT_RATE_OFDM_6 0x4000
-#define MT_PROT_RATE_OFDM_24 0x4004
-#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_RATE_OFDM_6 0x2000
+#define MT_PROT_RATE_OFDM_24 0x2004
+#define MT_PROT_RATE_DUP_OFDM_24 0x2084
+#define MT_PROT_RATE_SGI_OFDM_24 0x2104
#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
~MT_PROT_TXOP_ALLOW_MM40 & \
@@ -511,6 +541,7 @@
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
#define MT_AUTO_RSP_CFG 0x1404
+#define MT_AUTO_RSP_EN BIT(0)
#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
@@ -532,6 +563,7 @@
#define MT_PN_PAD_MODE 0x150c
#define MT_TXOP_HLDR_ET 0x1608
+#define MT_TXOP_HLDR_TX40M_BLK_EN BIT(1)
#define MT_PROT_AUTO_TX_CFG 0x1648
#define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 4598cb2cc3ff..a5413a309a0a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -177,7 +177,7 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (ret < 0)
return ret;
- if (pid && pid != MT_PACKET_ID_NO_ACK)
+ if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 81970cf777c0..098d05e109e7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -87,8 +87,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
txwi->pktid = pid;
- if ((pid && pid != MT_PACKET_ID_NO_ACK) ||
- q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
+ if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT;
else
qsel = MT_QSEL_EDCA;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 38bd466cff16..062614ad0d51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -75,6 +75,58 @@ static const struct ieee80211_iface_combination mt76x02_if_comb[] = {
}
};
+static void
+mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
+ u8 delay_off)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
+ mt76);
+ u32 val;
+
+ val = MT_LED_STATUS_DURATION(0xff) |
+ MT_LED_STATUS_OFF(delay_off) |
+ MT_LED_STATUS_ON(delay_on);
+
+ mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
+ mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
+
+ val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
+ MT_LED_CTRL_KICK(mdev->led_pin);
+ if (mdev->led_al)
+ val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
+ mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int
+mt76x02_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt76x02_led_set_config(mdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt76x02_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
+ led_cdev);
+
+ if (!brightness)
+ mt76x02_led_set_config(mdev, 0, 0xff);
+ else
+ mt76x02_led_set_config(mdev, 0xff, 0);
+}
+
void mt76x02_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -93,6 +145,8 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
MT_DMA_HDR_LEN;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
} else {
+ INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work);
+
mt76x02_dfs_init_detector(dev);
wiphy->reg_notifier = mt76x02_regd_notifier;
@@ -106,7 +160,16 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
#endif
BIT(NL80211_IFTYPE_ADHOC);
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set =
+ mt76x02_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink;
+ }
}
hw->sta_data_size = sizeof(struct mt76x02_sta);
@@ -189,8 +252,6 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
- ewma_signal_init(&msta->rssi);
-
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_add);
@@ -463,7 +524,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
return -EINVAL;
mutex_lock(&dev->mt76.mutex);
- mt76x02_mac_set_tx_protection(dev, val);
+ mt76x02_mac_set_rts_thresh(dev, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -546,24 +607,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete);
-int mt76x02_get_txpower(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, int *dbm)
-{
- struct mt76x02_dev *dev = hw->priv;
- u8 nstreams = dev->mt76.chainmask & 0xf;
-
- *dbm = dev->mt76.txpower_cur / 2;
-
- /* convert from per-chain power to combined
- * output on 2x2 devices
- */
- if (nstreams > 1)
- *dbm += 3;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x02_get_txpower);
-
void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
bool ps)
{
@@ -661,6 +704,10 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
tasklet_enable(&dev->pre_tbtt_tasklet);
}
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt76x02_mac_set_tx_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
if (changed & BSS_CHANGED_BEACON_INT) {
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 54a9b5fac787..f8534362e2c8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -143,6 +143,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_VHT_HT_FBK_CFG1, 0xedcba980 },
{ MT_PROT_AUTO_TX_CFG, 0x00830083 },
{ MT_HT_CTRL_CFG, 0x000001ff },
+ { MT_TX_LINK_CFG, 0x00001020 },
};
struct mt76_reg_pair prot_vals[] = {
{ MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
index acfa2b570c7c..40ef43926c06 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
@@ -26,29 +26,6 @@
#define MT_MCU_PCIE_REMAP_BASE2 0x0744
#define MT_MCU_PCIE_REMAP_BASE3 0x0748
-#define MT_LED_CTRL 0x0770
-#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
-#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
-#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
-#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
-
-#define MT_LED_TX_BLINK_0 0x0774
-#define MT_LED_TX_BLINK_1 0x0778
-
-#define MT_LED_S0_BASE 0x077C
-#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
-#define MT_LED_S1_BASE 0x0780
-#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
- MT_LED_STATUS_DURATION_MASK)
-
#define MT_MCU_ROM_PATCH_OFFSET 0x80000
#define MT_MCU_ROM_PATCH_ADDR 0x90000
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index b259e4b50f1e..28ec19acf9db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -53,7 +53,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x02_dev *dev);
void mt76x2_phy_power_on(struct mt76x02_dev *dev);
-int mt76x2_init_hardware(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 7f4ea2d00f42..4347d5e7a915 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -151,6 +151,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
MT_CH_TIME_CFG_RX_AS_BUSY |
MT_CH_TIME_CFG_NAV_AS_BUSY |
MT_CH_TIME_CFG_EIFS_AS_BUSY |
+ MT_CH_CCA_RC_EN |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
mt76x02_set_tx_ackto(dev);
@@ -260,7 +261,7 @@ mt76x2_power_on(struct mt76x02_dev *dev)
mt76x2_power_on_rf(dev, 1);
}
-int mt76x2_init_hardware(struct mt76x02_dev *dev)
+static int mt76x2_init_hardware(struct mt76x02_dev *dev)
{
int ret;
@@ -300,6 +301,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->wdt_work);
mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false);
}
@@ -340,54 +342,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
return dev;
}
-static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
- u8 delay_off)
-{
- struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev,
- mt76);
- u32 val;
-
- val = MT_LED_STATUS_DURATION(0xff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
-
- mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
- mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
-
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
- mt76_wr(dev, MT_LED_CTRL, val);
-}
-
-static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
- u8 delta_on, delta_off;
-
- delta_off = max_t(u8, *delay_off / 10, 1);
- delta_on = max_t(u8, *delay_on / 10, 1);
-
- mt76x2_led_set_config(mt76, delta_on, delta_off);
- return 0;
-}
-
-static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
-
- if (!brightness)
- mt76x2_led_set_config(mt76, 0, 0xff);
- else
- mt76x2_led_set_config(mt76, 0xff, 0);
-}
-
int mt76x2_register_device(struct mt76x02_dev *dev)
{
int ret;
@@ -402,12 +356,6 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
mt76x02_config_mac_addr_list(dev);
- /* init led callbacks */
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- }
-
ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index b54a32397486..06a26a152ba4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -34,6 +34,8 @@ mt76x2_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
+ MT_WATCHDOG_TIME);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
@@ -189,7 +191,7 @@ const struct ieee80211_ops mt76x2_ops = {
.sw_scan_complete = mt76x02_sw_scan_complete,
.flush = mt76x2_flush,
.ampdu_action = mt76x02_ampdu_action,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.release_buffered_frames = mt76_release_buffered_frames,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index da7cd40f56ff..65ed62229a5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -254,6 +254,8 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
0x38);
}
+ mt76x02_edcca_init(dev);
+
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index c9634a774705..e2ee5e498da7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -284,7 +284,9 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ if (!dev->cal.avg_rssi_all)
+ dev->cal.avg_rssi_all = -75;
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 2b48cc51a30d..286c7f451090 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -138,5 +138,5 @@ const struct ieee80211_ops mt76x2u_ops = {
.sw_scan_start = mt76x02_sw_scan,
.sw_scan_complete = mt76x02_sw_scan_complete,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
- .get_txpower = mt76x02_get_txpower,
+ .get_txpower = mt76_get_txpower,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index 45a95ee3a415..152d41fe9ff5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -39,7 +39,7 @@ static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
{
struct mt76_usb *usb = &dev->mt76.usb;
- const u8 data[] = {
+ static const u8 data[] = {
0x6f, 0xfc, 0x08, 0x01,
0x20, 0x04, 0x00, 0x00,
0x00, 0x09, 0x00,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7b711058807d..ef38e8626da9 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -170,21 +170,22 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
int pid;
if (!wcid)
- return 0;
+ return MT_PACKET_ID_NO_ACK;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
return MT_PACKET_ID_NO_ACK;
if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
- return 0;
+ return MT_PACKET_ID_NO_SKB;
spin_lock_bh(&dev->status_list.lock);
memset(cb, 0, sizeof(*cb));
wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
- if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK)
- wcid->packet_id = 1;
+ if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
+ wcid->packet_id == MT_PACKET_ID_NO_SKB)
+ wcid->packet_id = MT_PACKET_ID_FIRST;
pid = wcid->packet_id;
cb->wcid = wcid->idx;
@@ -330,7 +331,8 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
- info->flags |= IEEE80211_TX_STATUS_EOSP;
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
@@ -394,6 +396,11 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
bool probe;
int idx;
+ if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
+ *empty = true;
+ return 0;
+ }
+
skb = mt76_txq_dequeue(dev, mtxq, false);
if (!skb) {
*empty = true;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index b061263453d4..6a2507524c6c 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -407,17 +407,15 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
if (len < 0)
return 0;
+ data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
+ if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
+ return 0;
+
skb = build_skb(data, q->buf_size);
if (!skb)
return 0;
- data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
skb_reserve(skb, MT_DMA_HDR_LEN);
- if (skb->tail + data_len > skb->end) {
- dev_kfree_skb(skb);
- return 1;
- }
-
__skb_put(skb, data_len);
len -= data_len;
@@ -585,6 +583,7 @@ static void mt76u_stop_rx(struct mt76_dev *dev)
static void mt76u_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue_entry entry;
struct mt76u_buf *buf;
struct mt76_queue *q;
bool wake;
@@ -599,17 +598,18 @@ static void mt76u_tx_tasklet(unsigned long data)
if (!buf->done || !q->queued)
break;
- dev->drv->tx_complete_skb(dev, q,
- &q->entry[q->head],
- false);
-
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
q->swq_queued--;
}
+ entry = q->entry[q->head];
q->head = (q->head + 1) % q->ndesc;
q->queued--;
+
+ spin_unlock_bh(&q->lock);
+ dev->drv->tx_complete_skb(dev, q, &entry, false);
+ spin_lock_bh(&q->lock);
}
mt76_txq_schedule(dev, q);
wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 0c35b8db58cd..69270c1a9091 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -75,4 +75,46 @@ int mt76_wcid_alloc(unsigned long *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev)
+{
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ unsigned long mask = dev->wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->wcid[j]);
+ if (!wcid)
+ continue;
+
+ spin_lock(&dev->rx_lock);
+ if (wcid->inactive_count++ < 5)
+ cur_rssi = -ewma_signal_read(&wcid->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi);
+
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 7f3e3983b781..f7edeffb2b19 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -124,9 +124,9 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
u16 dma_len = get_unaligned_le16(data);
if (data_len < min_seg_len ||
- WARN_ON(!dma_len) ||
- WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
- WARN_ON(dma_len & 0x3))
+ WARN_ON_ONCE(!dma_len) ||
+ WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON_ONCE(dma_len & 0x3))
return 0;
return MT_DMA_HDRS + dma_len;
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
index 662d12703b69..57b503ae63f1 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -17,7 +17,7 @@
struct mt7601u_dev;
-#define MT7601U_EE_MAX_VER 0x0c
+#define MT7601U_EE_MAX_VER 0x0d
#define MT7601U_EEPROM_SIZE 256
#define MT7601U_DEFAULT_TX_POWER 6
diff --git a/drivers/net/wireless/quantenna/Makefile b/drivers/net/wireless/quantenna/Makefile
index baebfbde119e..cea83d178d2e 100644
--- a/drivers/net/wireless/quantenna/Makefile
+++ b/drivers/net/wireless/quantenna/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2015-2016 Quantenna Communications, Inc.
# All rights reserved.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 528ca7f5e070..14b569b6d1b5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015 Quantenna Communications
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */
#ifndef QTNFMAC_BUS_H
#define QTNFMAC_BUS_H
@@ -135,7 +122,5 @@ static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus)
int qtnf_core_attach(struct qtnf_bus *bus);
void qtnf_core_detach(struct qtnf_bus *bus);
-void qtnf_txflowblock(struct device *dev, bool state);
-void qtnf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
#endif /* QTNFMAC_BUS_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 51b33ec78fac..45f4cef7de9c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2012-2012 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/etherdevice.h>
@@ -122,7 +109,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
struct vif_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
- u8 *mac_addr;
+ u8 *mac_addr = NULL;
+ int use4addr = 0;
int ret;
ret = qtnf_validate_iface_combinations(wiphy, vif, type);
@@ -132,14 +120,14 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
return ret;
}
- if (params)
+ if (params) {
mac_addr = params->macaddr;
- else
- mac_addr = NULL;
+ use4addr = params->use_4addr;
+ }
qtnf_scan_done(vif->mac, true);
- ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr);
+ ret = qtnf_cmd_send_change_intf_type(vif, type, use4addr, mac_addr);
if (ret) {
pr_err("VIF%u.%u: failed to change type to %d\n",
vif->mac->macid, vif->vifid, type);
@@ -190,6 +178,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
u8 *mac_addr = NULL;
+ int use4addr = 0;
int ret;
mac = wiphy_priv(wiphy);
@@ -225,10 +214,12 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-ENOTSUPP);
}
- if (params)
+ if (params) {
mac_addr = params->macaddr;
+ use4addr = params->use_4addr;
+ }
- ret = qtnf_cmd_send_add_intf(vif, type, mac_addr);
+ ret = qtnf_cmd_send_add_intf(vif, type, use4addr, mac_addr);
if (ret) {
pr_err("VIF%u.%u: failed to add VIF %pM\n",
mac->macid, vif->vifid, mac_addr);
@@ -359,11 +350,6 @@ static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return -EFAULT;
}
- if (changed & (WIPHY_PARAM_RETRY_LONG | WIPHY_PARAM_RETRY_SHORT)) {
- pr_err("MAC%u: can't modify retry params\n", mac->macid);
- return -EOPNOTSUPP;
- }
-
ret = qtnf_cmd_send_update_phy_params(mac, changed);
if (ret)
pr_err("MAC%u: failed to update PHY params\n", mac->macid);
@@ -1107,7 +1093,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_4ADDR_STATION;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index b73425122a10..c374857283ac 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_CFG80211_H_
#define _QTN_FMAC_CFG80211_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 659e7649fe22..0f48f541de41 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1,17 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/types.h>
#include <linux/skbuff.h>
@@ -72,6 +60,8 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode)
return -EADDRINUSE;
case QLINK_CMD_RESULT_EADDRNOTAVAIL:
return -EADDRNOTAVAIL;
+ case QLINK_CMD_RESULT_EBUSY:
+ return -EBUSY;
default:
return -EFAULT;
}
@@ -97,14 +87,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
vif_id = cmd->vifid;
cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
- pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
- le16_to_cpu(cmd->cmd_id));
+ pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id);
if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
- le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) {
+ cmd_id != QLINK_CMD_FW_INIT) {
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
- mac_id, vif_id, le16_to_cpu(cmd->cmd_id),
- bus->fw_state);
+ mac_id, vif_id, cmd_id, bus->fw_state);
dev_kfree_skb(cmd_skb);
return -ENODEV;
}
@@ -138,7 +126,7 @@ out:
return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
- mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret);
+ mac_id, vif_id, cmd_id, ret);
return ret;
}
@@ -732,6 +720,7 @@ out:
static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
enum nl80211_iftype iftype,
+ int use4addr,
u8 *mac_addr,
enum qlink_cmd_type cmd_type)
{
@@ -749,6 +738,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
qtnf_bus_lock(vif->mac->bus);
cmd = (struct qlink_cmd_manage_intf *)cmd_skb->data;
+ cmd->intf_info.use4addr = use4addr;
switch (iftype) {
case NL80211_IFTYPE_AP:
@@ -784,17 +774,19 @@ out:
return ret;
}
-int qtnf_cmd_send_add_intf(struct qtnf_vif *vif,
- enum nl80211_iftype iftype, u8 *mac_addr)
+int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype,
+ int use4addr, u8 *mac_addr)
{
- return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr,
+ return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
QLINK_CMD_ADD_INTF);
}
int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
- enum nl80211_iftype iftype, u8 *mac_addr)
+ enum nl80211_iftype iftype,
+ int use4addr,
+ u8 *mac_addr)
{
- return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr,
+ return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
QLINK_CMD_CHANGE_INTF);
}
@@ -914,9 +906,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
return -E2BIG;
- hwinfo->rd = kzalloc(sizeof(*hwinfo->rd)
- + sizeof(struct ieee80211_reg_rule)
- * resp->n_reg_rules, GFP_KERNEL);
+ hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules,
+ resp->n_reg_rules), GFP_KERNEL);
if (!hwinfo->rd)
return -ENOMEM;
@@ -1558,11 +1549,11 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
switch (tlv_type) {
case QTN_TLV_ID_FRAG_THRESH:
phy_thr = (void *)tlv;
- mac_info->frag_thr = (u32)le16_to_cpu(phy_thr->thr);
+ mac_info->frag_thr = le32_to_cpu(phy_thr->thr);
break;
case QTN_TLV_ID_RTS_THRESH:
phy_thr = (void *)tlv;
- mac_info->rts_thr = (u32)le16_to_cpu(phy_thr->thr);
+ mac_info->rts_thr = le32_to_cpu(phy_thr->thr);
break;
case QTN_TLV_ID_SRETRY_LIMIT:
limit = (void *)tlv;
@@ -1810,15 +1801,23 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_bus_lock(mac->bus);
if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_FRAG_THRESH,
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_FRAG_THRESH,
wiphy->frag_threshold);
if (changed & WIPHY_PARAM_RTS_THRESHOLD)
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_RTS_THRESH,
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH,
wiphy->rts_threshold);
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
wiphy->coverage_class);
+ if (changed & WIPHY_PARAM_RETRY_LONG)
+ qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
+ wiphy->retry_long);
+
+ if (changed & WIPHY_PARAM_RETRY_SHORT)
+ qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
+ wiphy->retry_short);
+
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
goto out;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 1ac41156c192..96dff643bbc4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -1,17 +1,5 @@
-/*
- * Copyright (c) 2016 Quantenna Communications, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016 Quantenna Communications. All rights reserved. */
#ifndef QLINK_COMMANDS_H_
#define QLINK_COMMANDS_H_
@@ -26,9 +14,11 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus);
int qtnf_cmd_get_hw_info(struct qtnf_bus *bus);
int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac);
int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype,
- u8 *mac_addr);
+ int use4addr, u8 *mac_addr);
int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
- enum nl80211_iftype iftype, u8 *mac_addr);
+ enum nl80211_iftype iftype,
+ int use4addr,
+ u8 *mac_addr);
int qtnf_cmd_send_del_intf(struct qtnf_vif *vif);
int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
struct ieee80211_supported_band *band);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 5d18a4a917c9..ee1b75fda1dd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -195,6 +182,7 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype,
+ vif->wdev.use_4addr,
sa->sa_data);
if (ret)
@@ -545,7 +533,8 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr);
+ ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype,
+ vif->wdev.use_4addr, vif->mac_addr);
if (ret) {
pr_err("MAC%u: failed to add VIF\n", macid);
goto error;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 293055049caa..a31cff46e964 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_CORE_H_
#define _QTN_FMAC_CORE_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c
index 9f826b9ef5d9..598ece753a4b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/debug.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c
@@ -1,32 +1,11 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include "debug.h"
-#undef pr_fmt
-#define pr_fmt(fmt) "qtnfmac dbg: %s: " fmt, __func__
-
void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name)
{
bus->dbg_dir = debugfs_create_dir(name, NULL);
-
- if (IS_ERR_OR_NULL(bus->dbg_dir)) {
- pr_warn("failed to create debugfs root dir\n");
- bus->dbg_dir = NULL;
- }
}
void qtnf_debugfs_remove(struct qtnf_bus *bus)
@@ -38,9 +17,5 @@ void qtnf_debugfs_remove(struct qtnf_bus *bus)
void qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name,
int (*fn)(struct seq_file *seq, void *data))
{
- struct dentry *entry;
-
- entry = debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn);
- if (IS_ERR_OR_NULL(entry))
- pr_warn("failed to add entry (%s)\n", name);
+ debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.h b/drivers/net/wireless/quantenna/qtnfmac/debug.h
index d6dd12b5d434..61b45536b83a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/debug.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_DEBUG_H_
#define _QTN_FMAC_DEBUG_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 8b542b431b75..3fd1a9217737 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -158,6 +145,12 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
const struct qlink_event_bss_join *join_info,
u16 len)
{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ enum ieee80211_statuscode status = le16_to_cpu(join_info->status);
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_bss *bss = NULL;
+ u8 *ie = NULL;
+
if (unlikely(len < sizeof(*join_info))) {
pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
vif->mac->macid, vif->vifid, len,
@@ -171,15 +164,80 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid,
- join_info->bssid);
+ pr_debug("VIF%u.%u: BSSID:%pM status:%u\n",
+ vif->mac->macid, vif->vifid, join_info->bssid, status);
+
+ if (status == WLAN_STATUS_SUCCESS) {
+ qlink_chandef_q2cfg(wiphy, &join_info->chan, &chandef);
+ if (!cfg80211_chandef_valid(&chandef)) {
+ pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+ vif->mac->macid, vif->vifid,
+ chandef.chan->center_freq,
+ chandef.center_freq1,
+ chandef.center_freq2,
+ chandef.width);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+ bss = cfg80211_get_bss(wiphy, chandef.chan, join_info->bssid,
+ NULL, 0, IEEE80211_BSS_TYPE_ESS,
+ IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ pr_warn("VIF%u.%u: add missing BSS:%pM chan:%u\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid, chandef.chan->hw_value);
+
+ if (!vif->wdev.ssid_len) {
+ pr_warn("VIF%u.%u: SSID unknown for BSS:%pM\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ ie = kzalloc(2 + vif->wdev.ssid_len, GFP_KERNEL);
+ if (!ie) {
+ pr_warn("VIF%u.%u: IE alloc failed for BSS:%pM\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+
+ ie[0] = WLAN_EID_SSID;
+ ie[1] = vif->wdev.ssid_len;
+ memcpy(ie + 2, vif->wdev.ssid, vif->wdev.ssid_len);
+
+ bss = cfg80211_inform_bss(wiphy, chandef.chan,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ join_info->bssid, 0,
+ WLAN_CAPABILITY_ESS, 100,
+ ie, 2 + vif->wdev.ssid_len,
+ 0, GFP_KERNEL);
+ if (!bss) {
+ pr_warn("VIF%u.%u: can't connect to unknown BSS: %pM\n",
+ vif->mac->macid, vif->vifid,
+ join_info->bssid);
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ goto done;
+ }
+ }
+ }
+
+done:
cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL,
- 0, le16_to_cpu(join_info->status), GFP_KERNEL);
+ 0, status, GFP_KERNEL);
+ if (bss) {
+ if (!ether_addr_equal(vif->bssid, join_info->bssid))
+ ether_addr_copy(vif->bssid, join_info->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ }
- if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS)
+ if (status == WLAN_STATUS_SUCCESS)
netif_carrier_on(vif->netdev);
+ kfree(ie);
return 0;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.h b/drivers/net/wireless/quantenna/qtnfmac/event.h
index ae759b602c2a..533ad99d045d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_EVENT_H_
#define _QTN_FMAC_EVENT_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 598edb814421..cbcda57105f3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -559,6 +559,9 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
return IRQ_NONE;
+ if (!priv->msi_enabled)
+ qtnf_deassert_intx(ts);
+
priv->pcie_irq_count++;
qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
@@ -571,9 +574,6 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
tasklet_hi_schedule(&priv->reclaim_tq);
- if (!priv->msi_enabled)
- qtnf_deassert_intx(ts);
-
return IRQ_HANDLED;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 8d62addea895..27fdb5b01ee3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -1,25 +1,12 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_QLINK_H_
#define _QTN_QLINK_H_
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 11
+#define QLINK_PROTO_VER 13
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -105,7 +92,8 @@ struct qlink_intf_info {
__le16 if_type;
__le16 vlanid;
u8 mac_addr[ETH_ALEN];
- u8 rsvd[2];
+ u8 use4addr;
+ u8 rsvd[1];
} __packed;
enum qlink_sta_flags {
@@ -733,6 +721,7 @@ enum qlink_cmd_result {
QLINK_CMD_RESULT_EALREADY,
QLINK_CMD_RESULT_EADDRINUSE,
QLINK_CMD_RESULT_EADDRNOTAVAIL,
+ QLINK_CMD_RESULT_EBUSY,
};
/**
@@ -986,11 +975,13 @@ struct qlink_event_sta_deauth {
/**
* struct qlink_event_bss_join - data for QLINK_EVENT_BSS_JOIN event
*
+ * @chan: new operating channel definition
* @bssid: BSSID of a BSS which interface tried to joined.
* @status: status of joining attempt, see &enum ieee80211_statuscode.
*/
struct qlink_event_bss_join {
struct qlink_event ehdr;
+ struct qlink_chandef chan;
u8 bssid[ETH_ALEN];
__le16 status;
} __packed;
@@ -1182,7 +1173,7 @@ struct qlink_iface_limit_record {
struct qlink_tlv_frag_rts_thr {
struct qlink_tlv_hdr hdr;
- __le16 thr;
+ __le32 thr;
} __packed;
struct qlink_tlv_rlimit {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index aeeda81b09ea..72bfd17cb687 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -1,17 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/nl80211.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 960d5d97492f..781ea7fe79f2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_QLINK_UTIL_H_
#define _QTN_FMAC_QLINK_UTIL_H_
@@ -69,6 +56,17 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
memcpy(hdr->val, &tmp, sizeof(tmp));
}
+static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb,
+ u16 tlv_id, u32 value)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
+ __le32 tmp = cpu_to_le32(value);
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(sizeof(value));
+ memcpy(hdr->val, &tmp, sizeof(tmp));
+}
+
u16 qlink_iface_type_to_nl_mask(u16 qlink_type);
u8 qlink_chan_width_mask_to_nl(u16 qlink_mask);
void qlink_chandef_q2cfg(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
index 40295a511224..82d879950b62 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_HW_IDS_H_
#define _QTN_HW_IDS_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
index 2ec334199c2b..ff678951d3b2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/types.h>
#include <linux/io.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
index c2a3702a9ee7..52cac5439b03 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_SHM_IPC_H_
#define _QTN_FMAC_SHM_IPC_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
index 95a5f89a8b1a..78be70df1218 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_SHM_IPC_DEFS_H_
#define _QTN_FMAC_SHM_IPC_DEFS_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c
index 345f34ec9750..95356e280e23 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/trans.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include <linux/types.h>
#include <linux/export.h>
diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.h b/drivers/net/wireless/quantenna/qtnfmac/trans.h
index 9a473e07af0f..c0b76f871b31 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/trans.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/trans.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#ifndef _QTN_FMAC_TRANS_H_
#define _QTN_FMAC_TRANS_H_
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index 3bc96b264769..cda6f5f3f38a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
#include "util.h"
#include "qtn_hw_ids.h"
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index b8744baac332..a14b7078a9c7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015 Quantenna Communications
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */
#ifndef QTNFMAC_UTIL_H
#define QTNFMAC_UTIL_H
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 0e95555aec62..7f813f6f8792 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5477,7 +5477,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F);
- rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606);
+ rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0);
rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 61ba573e8bf1..05a2e8da412c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -656,36 +656,24 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf->driver_folder =
debugfs_create_dir(intf->rt2x00dev->ops->name,
rt2x00dev->hw->wiphy->debugfsdir);
- if (IS_ERR(intf->driver_folder) || !intf->driver_folder)
- goto exit;
intf->driver_entry =
rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
- if (IS_ERR(intf->driver_entry) || !intf->driver_entry)
- goto exit;
intf->chipset_entry =
rt2x00debug_create_file_chipset("chipset",
intf, &intf->chipset_blob);
- if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry)
- goto exit;
intf->dev_flags = debugfs_create_file("dev_flags", 0400,
intf->driver_folder, intf,
&rt2x00debug_fop_dev_flags);
- if (IS_ERR(intf->dev_flags) || !intf->dev_flags)
- goto exit;
intf->cap_flags = debugfs_create_file("cap_flags", 0400,
intf->driver_folder, intf,
&rt2x00debug_fop_cap_flags);
- if (IS_ERR(intf->cap_flags) || !intf->cap_flags)
- goto exit;
intf->register_folder =
debugfs_create_dir("register", intf->driver_folder);
- if (IS_ERR(intf->register_folder) || !intf->register_folder)
- goto exit;
#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
({ \
@@ -695,9 +683,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
0600, \
(__intf)->register_folder, \
&(__intf)->offset_##__name); \
- if (IS_ERR((__intf)->__name##_off_entry) || \
- !(__intf)->__name##_off_entry) \
- goto exit; \
\
(__intf)->__name##_val_entry = \
debugfs_create_file(__stringify(__name) "_value", \
@@ -705,9 +690,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
(__intf)->register_folder, \
(__intf), \
&rt2x00debug_fop_##__name); \
- if (IS_ERR((__intf)->__name##_val_entry) || \
- !(__intf)->__name##_val_entry) \
- goto exit; \
} \
})
@@ -721,15 +703,10 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
intf->queue_folder =
debugfs_create_dir("queue", intf->driver_folder);
- if (IS_ERR(intf->queue_folder) || !intf->queue_folder)
- goto exit;
intf->queue_frame_dump_entry =
debugfs_create_file("dump", 0400, intf->queue_folder,
intf, &rt2x00debug_fop_queue_dump);
- if (IS_ERR(intf->queue_frame_dump_entry)
- || !intf->queue_frame_dump_entry)
- goto exit;
skb_queue_head_init(&intf->frame_dump_skbqueue);
init_waitqueue_head(&intf->frame_dump_waitqueue);
@@ -747,10 +724,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
#endif
return;
-
-exit:
- rt2x00debug_deregister(rt2x00dev);
- rt2x00_err(rt2x00dev, "Failed to register debug handler\n");
}
void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 4c5de8fc8f12..52b9fc480f8b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -321,97 +321,12 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
- struct hw_key_entry key_entry;
- struct rt2x00_field32 field;
- u32 mask;
- u32 reg;
-
- if (crypto->cmd == SET_KEY) {
- /*
- * rt2x00lib can't determine the correct free
- * key_idx for shared keys. We have 1 register
- * with key valid bits. The goal is simple, read
- * the register, if that is full we have no slots
- * left.
- * Note that each BSS is allowed to have up to 4
- * shared keys, so put a mask over the allowed
- * entries.
- */
- mask = (0xf << crypto->bssidx);
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0);
- reg &= mask;
-
- if (reg && reg == mask)
- return -ENOSPC;
-
- key->hw_key_idx += reg ? ffz(reg) : 0;
-
- /*
- * Upload key to hardware
- */
- memcpy(key_entry.key, crypto->key,
- sizeof(key_entry.key));
- memcpy(key_entry.tx_mic, crypto->tx_mic,
- sizeof(key_entry.tx_mic));
- memcpy(key_entry.rx_mic, crypto->rx_mic,
- sizeof(key_entry.rx_mic));
-
- reg = SHARED_KEY_ENTRY(key->hw_key_idx);
- rt2x00mmio_register_multiwrite(rt2x00dev, reg,
- &key_entry, sizeof(key_entry));
-
- /*
- * The cipher types are stored over 2 registers.
- * bssidx 0 and 1 keys are stored in SEC_CSR1 and
- * bssidx 1 and 2 keys are stored in SEC_CSR5.
- * Using the correct defines correctly will cause overhead,
- * so just calculate the correct offset.
- */
- if (key->hw_key_idx < 8) {
- field.bit_offset = (3 * key->hw_key_idx);
- field.bit_mask = 0x7 << field.bit_offset;
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR1);
- rt2x00_set_field32(&reg, field, crypto->cipher);
- rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, reg);
- } else {
- field.bit_offset = (3 * (key->hw_key_idx - 8));
- field.bit_mask = 0x7 << field.bit_offset;
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR5);
- rt2x00_set_field32(&reg, field, crypto->cipher);
- rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, reg);
- }
-
- /*
- * The driver does not support the IV/EIV generation
- * in hardware. However it doesn't support the IV/EIV
- * inside the ieee80211 frame either, but requires it
- * to be provided separately for the descriptor.
- * rt2x00lib will cut the IV/EIV data out of all frames
- * given to us by mac80211, but we must tell mac80211
- * to generate the IV/EIV data.
- */
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- }
-
/*
- * SEC_CSR0 contains only single-bit fields to indicate
- * a particular key is valid. Because using the FIELD32()
- * defines directly will cause a lot of overhead, we use
- * a calculation to determine the correct bit directly.
+ * Let the software handle the shared keys,
+ * since the hardware decryption does not work reliably,
+ * because the firmware does not know the key's keyidx.
*/
- mask = 1 << key->hw_key_idx;
-
- reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0);
- if (crypto->cmd == SET_KEY)
- reg |= mask;
- else if (crypto->cmd == DISABLE_KEY)
- reg &= ~mask;
- rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, reg);
-
- return 0;
+ return -EOPNOTSUPP;
}
static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 33ad87528d9a..44a943d18b84 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -959,7 +959,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
if (proto == htons(ETH_P_AARP) || proto == htons(ETH_P_IPX)) {
/* This is the selective translation table, only 2 entries */
writeb(0xf8,
- &((struct snaphdr_t __iomem *)ptx->var)->org[3]);
+ &((struct snaphdr_t __iomem *)ptx->var)->org[2]);
}
/* Copy body of ethernet packet without ethernet header */
memcpy_toio((void __iomem *)&ptx->var +
@@ -2211,7 +2211,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
untranslate(local, skb, total_len);
}
} else { /* sniffer mode, so just pass whole packet */
- };
+ }
/************************/
/* Now pick up the rest of the fragments if any */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 2966681efaef..5d6b06d3c02c 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -2,4 +2,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
obj-$(CONFIG_RTL8180) += rtl818x_pci.o
-ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
+ccflags-y += -I $(srctree)/$(src)/..
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 225c1c8851cc..e2b1bfbcfbd4 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -803,7 +803,7 @@ static void rtl8180_config_cardbus(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, FEMR_SE, 0xffff);
} else {
reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
- reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
+ reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
}
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index ff074912a095..95bac73ece7c 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -2,4 +2,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o
obj-$(CONFIG_RTL8187) += rtl8187.o
-ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
+ccflags-y += -I $(srctree)/$(src)/..
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ef9b502ce576..7aa68fe5d791 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2172,8 +2172,6 @@ label_lps_done:
;
}
- rtlpriv->link_info.num_rx_inperiod = 0;
- rtlpriv->link_info.num_tx_inperiod = 0;
for (tid = 0; tid <= 7; tid++)
rtlpriv->link_info.tidtx_inperiod[tid] = 0;
@@ -2236,6 +2234,8 @@ label_lps_done:
rtlpriv->btcoexist.btc_info.in_4way = false;
}
+ rtlpriv->link_info.num_rx_inperiod = 0;
+ rtlpriv->link_info.num_tx_inperiod = 0;
rtlpriv->link_info.bcn_rx_inperiod = 0;
/* <6> scan list */
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4bf7967590ca..ce23339bf9fb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1957,5 +1957,7 @@ void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue)
dm_digtable->bt30_cur_igi = 0x32;
dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+ dm_digtable->pre_cck_fa_state = 0;
+ dm_digtable->cur_cck_fa_state = 0;
}
EXPORT_SYMBOL(rtl_dm_diginit);
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index d70385be9976..8186650efc56 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -463,12 +463,9 @@ static const struct file_operations file_ops_common_write = {
#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \
do { \
rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtl_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
- pr_err("Unable to initialize debugfs:%s/%s\n", \
- rtlpriv->dbg.debugfs_name, \
- #name); \
+ debugfs_create_file(#name, mode, parent, \
+ &rtl_debug_priv_ ##name, \
+ &file_ops_ ##fopname); \
} while (0)
#define RTL_DEBUGFS_ADD(name) \
@@ -486,11 +483,6 @@ void rtl_debug_add_one(struct ieee80211_hw *hw)
rtlpriv->dbg.debugfs_dir =
debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
- if (!rtlpriv->dbg.debugfs_dir) {
- pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
- rtlpriv->dbg.debugfs_name);
- return;
- }
parent = rtlpriv->dbg.debugfs_dir;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index 42a6fba90ba9..acfd54c6f8dd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -151,8 +151,14 @@ static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
long rssi_val_min = 0;
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION &&
+ rtlpriv->link_info.bcn_rx_inperiod == 0)
+ return 0;
+
if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
(dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
@@ -417,6 +423,8 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
} else {
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+ dm_digtable->pre_cck_fa_state = 0;
+ dm_digtable->cur_cck_fa_state = 0;
}
dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
@@ -665,7 +673,7 @@ void rtl8723e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rate_adaptive *p_ra = &(rtlpriv->ra);
+ struct rate_adaptive *p_ra = &rtlpriv->ra;
p_ra->ratr_state = DM_RATR_STA_INIT;
p_ra->pre_ratr_state = DM_RATR_STA_INIT;
@@ -677,6 +685,89 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
}
+void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *p_ra = &rtlpriv->ra;
+ u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+ struct ieee80211_sta *sta = NULL;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver is going to unload\n");
+ return;
+ }
+
+ if (!rtlpriv->dm.useramask) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver does not control rate adaptive mask\n");
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED &&
+ mac->opmode == NL80211_IFTYPE_STATION) {
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_HIGH:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra = 55;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra = 60;
+ low_rssithresh_for_ra = 25;
+ break;
+ default:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ }
+
+ if (rtlpriv->link_info.bcn_rx_inperiod == 0)
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_HIGH:
+ default:
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ break;
+ case DM_RATR_STA_MIDDLE:
+ case DM_RATR_STA_LOW:
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+ break;
+ }
+ else if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
+
+ rcu_read_lock();
+ sta = rtl_find_sta(hw, mac->bssid);
+ if (sta)
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+ p_ra->ratr_state,
+ true);
+ rcu_read_unlock();
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+ }
+ }
+}
+
void rtl8723e_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -826,7 +917,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw)
rtl8723e_dm_dynamic_bb_powersaving(hw);
rtl8723e_dm_dynamic_txpower(hw);
rtl8723e_dm_check_txpower_tracking(hw);
- /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */
+ rtl8723e_dm_refresh_rate_adaptive_mask(hw);
rtl8723e_dm_bt_coexist(hw);
rtl8723e_dm_check_edca_turbo(hw);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 07b82700d1de..3103151dda2b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -266,8 +266,8 @@ static struct rtl_hal_ops rtl8723e_hal_ops = {
static struct rtl_mod_params rtl8723e_mod_params = {
.sw_crypto = false,
.inactiveps = true,
- .swctrl_lps = false,
- .fwctrl_lps = true,
+ .swctrl_lps = true,
+ .fwctrl_lps = false,
.aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
@@ -395,8 +395,8 @@ module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
+MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 1263b12db5dc..11f94b1a436f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -332,7 +332,7 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
@@ -374,7 +374,7 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
"Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
- };
+ }
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
@@ -694,7 +694,7 @@ static u8 _rtl8723be_get_rate_section_index(u32 regaddr)
else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
index = (u8)((regaddr - 0xE20) / 4);
break;
- };
+ }
return index;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index 8c6ca8e689e4..d0c35f3e2012 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -297,11 +297,6 @@ int rsi_init_dbgfs(struct rsi_hw *adapter)
dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
- if (!dev_dbgfs->subdir) {
- kfree(dev_dbgfs);
- return -ENOMEM;
- }
-
for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
files = &dev_debugfs_files[ii];
dev_dbgfs->rsi_files[ii] =
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 182b06629371..1dbaab2a96b7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -100,6 +100,9 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
mgmt_desc->frame_type = TX_DOT11_MGMT;
mgmt_desc->header_len = MIN_802_11_HDR_LEN;
mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ;
+
+ if (ieee80211_is_probe_req(wh->frame_control))
+ mgmt_desc->frame_info = cpu_to_le16(RSI_INSERT_SEQ_IN_FW);
mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE);
if (is_broadcast_ether_addr(wh->addr1))
mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index e56fc83faf0e..aded1ae4fad5 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -229,6 +229,68 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
/* sbands->ht_cap.mcs.rx_highest = 0x82; */
}
+static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct cfg80211_scan_request *scan_req = &hw_req->req;
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
+
+ rsi_dbg(INFO_ZONE, "***** Hardware scan start *****\n");
+
+ if (common->fsm_state != FSM_MAC_INIT_DONE)
+ return -ENODEV;
+
+ if ((common->wow_flags & RSI_WOW_ENABLED) ||
+ scan_req->n_channels == 0)
+ return -EINVAL;
+
+ /* Scan already in progress. So return */
+ if (common->bgscan_en)
+ return -EBUSY;
+
+ /* If STA is not connected, return with special value 1, in order
+ * to start sw_scan in mac80211
+ */
+ if (!bss->assoc)
+ return 1;
+
+ mutex_lock(&common->mutex);
+ common->hwscan = scan_req;
+ if (!rsi_send_bgscan_params(common, RSI_START_BGSCAN)) {
+ if (!rsi_send_bgscan_probe_req(common, vif)) {
+ rsi_dbg(INFO_ZONE, "Background scan started...\n");
+ common->bgscan_en = true;
+ }
+ }
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+static void rsi_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct cfg80211_scan_info info;
+
+ rsi_dbg(INFO_ZONE, "***** Hardware scan stop *****\n");
+ mutex_lock(&common->mutex);
+
+ if (common->bgscan_en) {
+ if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN))
+ common->bgscan_en = false;
+ info.aborted = false;
+ ieee80211_scan_completed(adapter->hw, &info);
+ rsi_dbg(INFO_ZONE, "Back ground scan cancelled\b\n");
+ }
+ common->hwscan = NULL;
+ mutex_unlock(&common->mutex);
+}
+
/**
* rsi_mac80211_detach() - This function is used to de-initialize the
* Mac80211 stack.
@@ -1917,6 +1979,8 @@ static const struct ieee80211_ops mac80211_ops = {
.suspend = rsi_mac80211_suspend,
.resume = rsi_mac80211_resume,
#endif
+ .hw_scan = rsi_mac80211_hw_scan_start,
+ .cancel_hw_scan = rsi_mac80211_cancel_hw_scan,
};
/**
@@ -1999,6 +2063,9 @@ int rsi_mac80211_attach(struct rsi_common *common)
common->max_stations = wiphy->max_ap_assoc_sta;
rsi_dbg(ERR_ZONE, "Max Stations Allowed = %d\n", common->max_stations);
hw->sta_data_size = sizeof(struct rsi_sta);
+
+ wiphy->max_scan_ssids = RSI_MAX_SCAN_SSIDS;
+ wiphy->max_scan_ie_len = RSI_MAX_SCAN_IE_LEN;
wiphy->flags = WIPHY_FLAG_REPORTS_OBSS;
wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 01d99ed985ee..ca3a55ed72e4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -328,6 +328,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
}
rsi_default_ps_params(adapter);
+ init_bgscan_params(common);
spin_lock_init(&adapter->ps_lock);
timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
init_completion(&common->wlan_init_completion);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1095df7d9573..404241424a62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -15,6 +15,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/timer.h>
#include "rsi_mgmt.h"
#include "rsi_common.h"
#include "rsi_ps.h"
@@ -236,6 +237,18 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->dtim_cnt = RSI_DTIM_COUNT;
}
+void init_bgscan_params(struct rsi_common *common)
+{
+ memset((u8 *)&common->bgscan, 0, sizeof(struct rsi_bgscan_params));
+ common->bgscan.bgscan_threshold = RSI_DEF_BGSCAN_THRLD;
+ common->bgscan.roam_threshold = RSI_DEF_ROAM_THRLD;
+ common->bgscan.bgscan_periodicity = RSI_BGSCAN_PERIODICITY;
+ common->bgscan.num_bgscan_channels = 0;
+ common->bgscan.two_probe = 1;
+ common->bgscan.active_scan_duration = RSI_ACTIVE_SCAN_TIME;
+ common->bgscan.passive_scan_duration = RSI_PASSIVE_SCAN_TIME;
+}
+
/**
* rsi_set_contention_vals() - This function sets the contention values for the
* backoff procedure.
@@ -1628,6 +1641,107 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
}
#endif
+int rsi_send_bgscan_params(struct rsi_common *common, int enable)
+{
+ struct rsi_bgscan_params *params = &common->bgscan;
+ struct cfg80211_scan_request *scan_req = common->hwscan;
+ struct rsi_bgscan_config *bgscan;
+ struct sk_buff *skb;
+ u16 frame_len = sizeof(*bgscan);
+ u8 i;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan params frame\n", __func__);
+
+ skb = dev_alloc_skb(frame_len);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, frame_len);
+
+ bgscan = (struct rsi_bgscan_config *)skb->data;
+ rsi_set_len_qno(&bgscan->desc_dword0.len_qno,
+ (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
+ bgscan->desc_dword0.frame_type = BG_SCAN_PARAMS;
+ bgscan->bgscan_threshold = cpu_to_le16(params->bgscan_threshold);
+ bgscan->roam_threshold = cpu_to_le16(params->roam_threshold);
+ if (enable)
+ bgscan->bgscan_periodicity =
+ cpu_to_le16(params->bgscan_periodicity);
+ bgscan->active_scan_duration =
+ cpu_to_le16(params->active_scan_duration);
+ bgscan->passive_scan_duration =
+ cpu_to_le16(params->passive_scan_duration);
+ bgscan->two_probe = params->two_probe;
+
+ bgscan->num_bgscan_channels = scan_req->n_channels;
+ for (i = 0; i < bgscan->num_bgscan_channels; i++)
+ bgscan->channels2scan[i] =
+ cpu_to_le16(scan_req->channels[i]->hw_value);
+
+ skb_put(skb, frame_len);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/* This function sends the probe request to be used by firmware in
+ * background scan
+ */
+int rsi_send_bgscan_probe_req(struct rsi_common *common,
+ struct ieee80211_vif *vif)
+{
+ struct cfg80211_scan_request *scan_req = common->hwscan;
+ struct rsi_bgscan_probe *bgscan;
+ struct sk_buff *skb;
+ struct sk_buff *probereq_skb;
+ u16 frame_len = sizeof(*bgscan);
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending bgscan probe req frame\n", __func__);
+
+ if (common->priv->sc_nvifs <= 0)
+ return -ENODEV;
+
+ if (scan_req->n_ssids) {
+ ssid = scan_req->ssids[0].ssid;
+ ssid_len = scan_req->ssids[0].ssid_len;
+ }
+
+ skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN);
+
+ bgscan = (struct rsi_bgscan_probe *)skb->data;
+ bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ;
+ bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG);
+ if (common->band == NL80211_BAND_5GHZ) {
+ bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6);
+ bgscan->def_chan = cpu_to_le16(40);
+ } else {
+ bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1);
+ bgscan->def_chan = cpu_to_le16(11);
+ }
+ bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME);
+
+ probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid,
+ ssid_len, scan_req->ie_len);
+
+ memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len);
+
+ bgscan->probe_req_length = cpu_to_le16(probereq_skb->len);
+
+ rsi_set_len_qno(&bgscan->desc_dword0.len_qno,
+ (frame_len - FRAME_DESC_SZ + probereq_skb->len),
+ RSI_WIFI_MGMT_Q);
+
+ skb_put(skb, frame_len + probereq_skb->len);
+
+ dev_kfree_skb(probereq_skb);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
/**
* rsi_handle_ta_confirm_type() - This function handles the confirm frames.
* @common: Pointer to the driver private structure.
@@ -1771,9 +1885,28 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
return 0;
}
break;
+
+ case SCAN_REQUEST:
+ rsi_dbg(INFO_ZONE, "Set channel confirm\n");
+ break;
+
case WAKEUP_SLEEP_REQUEST:
rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n");
return rsi_handle_ps_confirm(adapter, msg);
+
+ case BG_SCAN_PROBE_REQ:
+ rsi_dbg(INFO_ZONE, "BG scan complete event\n");
+ if (common->bgscan_en) {
+ struct cfg80211_scan_info info;
+
+ if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN))
+ common->bgscan_en = 0;
+ info.aborted = false;
+ ieee80211_scan_completed(adapter->hw, &info);
+ }
+ rsi_dbg(INFO_ZONE, "Background scan completed\n");
+ break;
+
default:
rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n",
__func__);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 5733e440ecaf..b412b65eb1f4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -230,16 +230,19 @@ static void rsi_reset_card(struct sdio_func *pfunction)
rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
/* Issue CMD5, arg = 0 */
- err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
- (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
- if (err)
- rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err);
- card->ocr = resp;
+ if (!host->ocr_avail) {
+ err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
+ (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ host->ocr_avail = resp;
+ }
/* Issue CMD5, arg = ocr. Wait till card is ready */
for (i = 0; i < 100; i++) {
err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND,
- card->ocr,
+ host->ocr_avail,
(MMC_RSP_R4 | MMC_CMD_BCR), &resp);
if (err) {
rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index a084f224bb03..4dc0c0123469 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -164,6 +164,24 @@ struct transmit_q_stats {
u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 2];
};
+#define MAX_BGSCAN_CHANNELS_DUAL_BAND 38
+#define MAX_BGSCAN_PROBE_REQ_LEN 0x64
+#define RSI_DEF_BGSCAN_THRLD 0x0
+#define RSI_DEF_ROAM_THRLD 0xa
+#define RSI_BGSCAN_PERIODICITY 0x1e
+#define RSI_ACTIVE_SCAN_TIME 0x14
+#define RSI_PASSIVE_SCAN_TIME 0x46
+#define RSI_CHANNEL_SCAN_TIME 20
+struct rsi_bgscan_params {
+ u16 bgscan_threshold;
+ u16 roam_threshold;
+ u16 bgscan_periodicity;
+ u8 num_bgscan_channels;
+ u8 two_probe;
+ u16 active_scan_duration;
+ u16 passive_scan_duration;
+};
+
struct vif_priv {
bool is_ht;
bool sgi;
@@ -289,6 +307,10 @@ struct rsi_common {
bool eapol4_confirm;
void *bt_adapter;
+
+ struct cfg80211_scan_request *hwscan;
+ struct rsi_bgscan_params bgscan;
+ u8 bgscan_en;
};
struct eepromrw_info {
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 359fbdf85739..ea83faa15c7e 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -228,6 +228,9 @@
#define RSI_MAX_TX_AGGR_FRMS 8
#define RSI_MAX_RX_AGGR_FRMS 8
+#define RSI_MAX_SCAN_SSIDS 16
+#define RSI_MAX_SCAN_IE_LEN 256
+
enum opmode {
RSI_OPMODE_UNSUPPORTED = -1,
RSI_OPMODE_AP = 0,
@@ -623,6 +626,34 @@ struct rsi_wowlan_req {
u16 host_sleep_status;
} __packed;
+#define RSI_START_BGSCAN 1
+#define RSI_STOP_BGSCAN 0
+#define HOST_BG_SCAN_TRIG BIT(4)
+struct rsi_bgscan_config {
+ struct rsi_cmd_desc_dword0 desc_dword0;
+ __le64 reserved;
+ __le32 reserved1;
+ __le16 bgscan_threshold;
+ __le16 roam_threshold;
+ __le16 bgscan_periodicity;
+ u8 num_bgscan_channels;
+ u8 two_probe;
+ __le16 active_scan_duration;
+ __le16 passive_scan_duration;
+ __le16 channels2scan[MAX_BGSCAN_CHANNELS_DUAL_BAND];
+} __packed;
+
+struct rsi_bgscan_probe {
+ struct rsi_cmd_desc_dword0 desc_dword0;
+ __le64 reserved;
+ __le32 reserved1;
+ __le16 mgmt_rate;
+ __le16 flags;
+ __le16 def_chan;
+ __le16 channel_scan_time;
+ __le16 probe_req_length;
+} __packed;
+
static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
{
return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
@@ -694,4 +725,8 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
#endif
int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
struct ieee80211_vif *vif);
+void init_bgscan_params(struct rsi_common *common);
+int rsi_send_bgscan_params(struct rsi_common *common, int enable);
+int rsi_send_bgscan_probe_req(struct rsi_common *common,
+ struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c
index 2231ba08bc1f..d94266d9d0b8 100644
--- a/drivers/net/wireless/st/cw1200/debug.c
+++ b/drivers/net/wireless/st/cw1200/debug.c
@@ -371,28 +371,14 @@ int cw1200_debug_init(struct cw1200_common *priv)
d->debugfs_phy = debugfs_create_dir("cw1200",
priv->hw->wiphy->debugfsdir);
- if (!d->debugfs_phy)
- goto err;
-
- if (!debugfs_create_file("status", 0400, d->debugfs_phy,
- priv, &cw1200_status_fops))
- goto err;
-
- if (!debugfs_create_file("counters", 0400, d->debugfs_phy,
- priv, &cw1200_counters_fops))
- goto err;
-
- if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy,
- priv, &fops_wsm_dumps))
- goto err;
+ debugfs_create_file("status", 0400, d->debugfs_phy, priv,
+ &cw1200_status_fops);
+ debugfs_create_file("counters", 0400, d->debugfs_phy, priv,
+ &cw1200_counters_fops);
+ debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, priv,
+ &fops_wsm_dumps);
return 0;
-
-err:
- priv->debug = NULL;
- debugfs_remove_recursive(d->debugfs_phy);
- kfree(d);
- return ret;
}
void cw1200_debug_release(struct cw1200_common *priv)
diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c
index 30e7646d04af..b7881232499c 100644
--- a/drivers/net/wireless/st/cw1200/fwio.c
+++ b/drivers/net/wireless/st/cw1200/fwio.c
@@ -465,8 +465,8 @@ int cw1200_load_firmware(struct cw1200_common *priv)
if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) {
pr_err("Device is already in QUEUE mode!\n");
- ret = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto out;
}
switch (priv->hw_type) {
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 7c31b63b8258..7895efefa95d 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -283,7 +283,6 @@ int cw1200_queue_put(struct cw1200_queue *queue,
struct cw1200_txpriv *txpriv)
{
int ret = 0;
- LIST_HEAD(gc_list);
struct cw1200_queue_stats *stats = queue->stats;
if (txpriv->link_id >= queue->stats->map_capacity)
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 0a9eac93dd01..71e9b91cf15b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -84,8 +84,11 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
- if (!frame.skb)
+ if (!frame.skb) {
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
return -ENOMEM;
+ }
if (req->ie_len)
skb_put_data(frame.skb, req->ie, req->ie_len);
diff --git a/drivers/net/wireless/ti/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c
index 448da1f8c22f..c99b23aaa70e 100644
--- a/drivers/net/wireless/ti/wl1251/debugfs.c
+++ b/drivers/net/wireless/ti/wl1251/debugfs.c
@@ -54,11 +54,6 @@ static const struct file_operations name## _ops = { \
#define DEBUGFS_ADD(name, parent) \
wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \
wl, &name## _ops); \
- if (IS_ERR(wl->debugfs.name)) { \
- ret = PTR_ERR(wl->debugfs.name); \
- wl->debugfs.name = NULL; \
- goto out; \
- }
#define DEBUGFS_DEL(name) \
do { \
@@ -354,10 +349,8 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
DEBUGFS_DEL(excessive_retries);
}
-static int wl1251_debugfs_add_files(struct wl1251 *wl)
+static void wl1251_debugfs_add_files(struct wl1251 *wl)
{
- int ret = 0;
-
DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
@@ -453,12 +446,6 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
-
-out:
- if (ret < 0)
- wl1251_debugfs_delete_files(wl);
-
- return ret;
}
void wl1251_debugfs_reset(struct wl1251 *wl)
@@ -471,56 +458,20 @@ void wl1251_debugfs_reset(struct wl1251 *wl)
int wl1251_debugfs_init(struct wl1251 *wl)
{
- int ret;
+ wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), GFP_KERNEL);
+ if (!wl->stats.fw_stats)
+ return -ENOMEM;
wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR(wl->debugfs.rootdir)) {
- ret = PTR_ERR(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
- goto err;
- }
-
wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics",
wl->debugfs.rootdir);
- if (IS_ERR(wl->debugfs.fw_statistics)) {
- ret = PTR_ERR(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
- goto err_root;
- }
-
- wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
- GFP_KERNEL);
-
- if (!wl->stats.fw_stats) {
- ret = -ENOMEM;
- goto err_fw;
- }
-
wl->stats.fw_stats_update = jiffies;
- ret = wl1251_debugfs_add_files(wl);
-
- if (ret < 0)
- goto err_file;
+ wl1251_debugfs_add_files(wl);
return 0;
-
-err_file:
- kfree(wl->stats.fw_stats);
- wl->stats.fw_stats = NULL;
-
-err_fw:
- debugfs_remove(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
-
-err_root:
- debugfs_remove(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
-
-err:
- return ret;
}
void wl1251_debugfs_exit(struct wl1251 *wl)
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c
index 0521cbf858cf..6c3c04eca8fd 100644
--- a/drivers/net/wireless/ti/wl12xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.c
@@ -125,20 +125,10 @@ WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
int wl12xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
- int ret = 0;
- struct dentry *entry, *stats, *moddir;
+ struct dentry *stats, *moddir;
moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
- if (!moddir || IS_ERR(moddir)) {
- entry = moddir;
- goto err;
- }
-
stats = debugfs_create_dir("fw_stats", moddir);
- if (!stats || IS_ERR(stats)) {
- entry = stats;
- goto err;
- }
DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
@@ -232,12 +222,4 @@ int wl12xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
return 0;
-
-err:
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- ret = -ENOMEM;
-
- return ret;
}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 597e934c4630..5f4ec997ca59 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -422,20 +422,10 @@ static const struct file_operations radar_debug_mode_ops = {
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
- int ret = 0;
- struct dentry *entry, *stats, *moddir;
+ struct dentry *stats, *moddir;
moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
- if (!moddir || IS_ERR(moddir)) {
- entry = moddir;
- goto err;
- }
-
stats = debugfs_create_dir("fw_stats", moddir);
- if (!stats || IS_ERR(stats)) {
- entry = stats;
- goto err;
- }
DEBUGFS_ADD(clear_fw_stats, stats);
@@ -590,12 +580,4 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(dynamic_fw_traces, moddir);
return 0;
-
-err:
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- ret = -ENOMEM;
-
- return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 903968735a74..348be0aed97e 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1427,7 +1427,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("could not set keys");
- goto out;
+ goto out;
}
out:
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index aeb74e74698e..68acd901d384 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -1301,11 +1301,10 @@ static const struct file_operations fw_logger_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl,
- struct dentry *rootdir)
+static void wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
{
- int ret = 0;
- struct dentry *entry, *streaming;
+ struct dentry *streaming;
DEBUGFS_ADD(tx_queue_len, rootdir);
DEBUGFS_ADD(retry_count, rootdir);
@@ -1330,23 +1329,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(fw_logger, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
- if (!streaming || IS_ERR(streaming))
- goto err;
DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
-
- return 0;
-
-err:
- if (IS_ERR(entry))
- ret = PTR_ERR(entry);
- else
- ret = -ENOMEM;
-
- return ret;
}
void wl1271_debugfs_reset(struct wl1271 *wl)
@@ -1367,11 +1354,6 @@ int wl1271_debugfs_init(struct wl1271 *wl)
rootdir = debugfs_create_dir(KBUILD_MODNAME,
wl->hw->wiphy->debugfsdir);
- if (IS_ERR(rootdir)) {
- ret = PTR_ERR(rootdir);
- goto out;
- }
-
wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
if (!wl->stats.fw_stats) {
ret = -ENOMEM;
@@ -1380,9 +1362,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
- ret = wl1271_debugfs_add_files(wl, rootdir);
- if (ret < 0)
- goto out_exit;
+ wl1271_debugfs_add_files(wl, rootdir);
ret = wlcore_debugfs_init(wl, rootdir);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index bf14676e6515..a4952c4f587e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -53,19 +53,15 @@ static const struct file_operations name## _ops = { \
#define DEBUGFS_ADD(name, parent) \
do { \
- entry = debugfs_create_file(#name, 0400, parent, \
- wl, &name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
+ debugfs_create_file(#name, 0400, parent, \
+ wl, &name## _ops); \
} while (0)
#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
do { \
- entry = debugfs_create_file(#name, 0400, parent, \
+ debugfs_create_file(#name, 0400, parent, \
wl, &prefix## _## name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
} while (0)
#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 26b187336875..2e12de813a5b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1085,8 +1085,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
goto out;
ret = wl12xx_fetch_firmware(wl, plt);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ kfree(wl->tx_res_if);
+ }
out:
return ret;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5ad1342f5682..de6157357e26 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -16,7 +16,6 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -463,7 +462,6 @@ int of_phy_register_fixed_link(struct device_node *np)
struct device_node *fixed_link_node;
u32 fixed_link_prop[5];
const char *managed;
- int link_gpio = -1;
if (of_property_read_string(np, "managed", &managed) == 0 &&
strcmp(managed, "in-band-status") == 0) {
@@ -485,11 +483,7 @@ int of_phy_register_fixed_link(struct device_node *np)
status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause");
- link_gpio = of_get_named_gpio_flags(fixed_link_node,
- "link-gpios", 0, NULL);
of_node_put(fixed_link_node);
- if (link_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
goto register_phy;
}
@@ -508,8 +502,7 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
register_phy:
- return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, link_gpio,
- np));
+ return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np));
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index daabaceeea52..e559f4c25cf7 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -505,6 +505,17 @@ static netdev_tx_t port_dropframe(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int swdev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+
+ ppid->id_len = 1;
+ ppid->id[0] = port_priv->ethsw_data->dev_id;
+
+ return 0;
+}
+
static const struct net_device_ops ethsw_port_ops = {
.ndo_open = port_open,
.ndo_stop = port_stop,
@@ -515,6 +526,7 @@ static const struct net_device_ops ethsw_port_ops = {
.ndo_get_offload_stats = port_get_offload_stats,
.ndo_start_xmit = port_dropframe,
+ .ndo_get_port_parent_id = swdev_get_port_parent_id,
};
static void ethsw_links_state_update(struct ethsw_core *ethsw)
@@ -634,10 +646,6 @@ static int swdev_port_attr_get(struct net_device *netdev,
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = 1;
- attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
- break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags =
(port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index ef61f3607e99..60b94b944e9f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -332,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom(
struct ssb_sprom *out));
struct bcma_bus {
+ struct device *dev;
+
/* The MMIO area. */
void __iomem *mmio;
@@ -339,14 +341,7 @@ struct bcma_bus {
enum bcma_hosttype hosttype;
bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
- union {
- /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
- struct pci_dev *host_pci;
- /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
- struct sdio_func *host_sdio;
- /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
- struct platform_device *host_pdev;
- };
+ struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */
struct bcma_chipinfo chipinfo;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index afd9596ce636..19a8de5326fb 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -400,4 +400,19 @@ struct ethtool_ops {
void (*get_ethtool_phy_stats)(struct net_device *,
struct ethtool_stats *, u64 *);
};
+
+struct ethtool_rx_flow_rule {
+ struct flow_rule *rule;
+ unsigned long priv[0];
+};
+
+struct ethtool_rx_flow_spec_input {
+ const struct ethtool_rx_flow_spec *fs;
+ u32 rss_ctx;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ba57d0ba425e..1d95e634f3fe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1188,6 +1188,10 @@ struct dev_ifalias {
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
*
+ * int (*ndo_get_port_parent_id)(struct net_device *dev,
+ * struct netdev_phys_item_id *ppid)
+ * Called to get the parent ID of the physical port of this device.
+ *
* void (*ndo_udp_tunnel_add)(struct net_device *dev,
* struct udp_tunnel_info *ti);
* Called by UDP tunnel to notify a driver about the UDP port and socket
@@ -1412,6 +1416,8 @@ struct net_device_ops {
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+ int (*ndo_get_port_parent_id)(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
void (*ndo_udp_tunnel_add)(struct net_device *dev,
@@ -3651,6 +3657,9 @@ int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
+int dev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse);
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 70f83d0d7469..237dd035858a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -693,17 +693,6 @@ static inline bool phy_is_started(struct phy_device *phydev)
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
/**
- * phy_read_mmd - Convenience function for reading a register
- * from an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for phy_read();
- */
-int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
-
-/**
* phy_read - Convenience function for reading a given PHY register
* @phydev: the phy_device struct
* @regnum: register number to read
@@ -758,9 +747,60 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
val);
}
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for phy_read();
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+
+/**
+ * __phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for __phy_read();
+ */
+int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+
+/**
+ * __phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for __phy_write();
+ */
+int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+
int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+
/**
* __phy_set_bits - Convenience function for setting bits in a PHY register
* @phydev: the phy_device struct
@@ -811,6 +851,66 @@ static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_set_bits_mmd - Convenience function for setting bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return __phy_modify_mmd(phydev, devad, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits_mmd - Convenience function for clearing bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return __phy_modify_mmd(phydev, devad, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits_mmd - Convenience function for setting bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to set
+ */
+static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return phy_modify_mmd(phydev, devad, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits_mmd - Convenience function for clearing bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return phy_modify_mmd(phydev, devad, regnum, val, 0);
+}
+
+/**
* phy_interrupt_is_valid - Convenience function for testing a given PHY irq
* @phydev: the phy_device struct
*
@@ -886,18 +986,6 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
return phydev->is_pseudo_fixed_link;
}
-/**
- * phy_write_mmd - Convenience function for writing a register
- * on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for phy_write();
- */
-int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
-
int phy_save_page(struct phy_device *phydev);
int phy_select_page(struct phy_device *phydev, int page);
int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 9525567b1951..c78fc203db43 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -15,11 +15,9 @@ struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status,
- int link_gpio);
+ struct fixed_phy_status *status);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int link_gpio,
struct device_node *np);
extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -27,14 +25,12 @@ extern int fixed_phy_set_link_update(struct phy_device *phydev,
struct fixed_phy_status *));
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status,
- int link_gpio)
+ struct fixed_phy_status *status)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
new file mode 100644
index 000000000000..23166caa0da5
--- /dev/null
+++ b/include/net/flow_offload.h
@@ -0,0 +1,203 @@
+#ifndef _NET_FLOW_OFFLOAD_H
+#define _NET_FLOW_OFFLOAD_H
+
+#include <net/flow_dissector.h>
+
+struct flow_match {
+ struct flow_dissector *dissector;
+ void *mask;
+ void *key;
+};
+
+struct flow_match_basic {
+ struct flow_dissector_key_basic *key, *mask;
+};
+
+struct flow_match_control {
+ struct flow_dissector_key_control *key, *mask;
+};
+
+struct flow_match_eth_addrs {
+ struct flow_dissector_key_eth_addrs *key, *mask;
+};
+
+struct flow_match_vlan {
+ struct flow_dissector_key_vlan *key, *mask;
+};
+
+struct flow_match_ipv4_addrs {
+ struct flow_dissector_key_ipv4_addrs *key, *mask;
+};
+
+struct flow_match_ipv6_addrs {
+ struct flow_dissector_key_ipv6_addrs *key, *mask;
+};
+
+struct flow_match_ip {
+ struct flow_dissector_key_ip *key, *mask;
+};
+
+struct flow_match_ports {
+ struct flow_dissector_key_ports *key, *mask;
+};
+
+struct flow_match_icmp {
+ struct flow_dissector_key_icmp *key, *mask;
+};
+
+struct flow_match_tcp {
+ struct flow_dissector_key_tcp *key, *mask;
+};
+
+struct flow_match_mpls {
+ struct flow_dissector_key_mpls *key, *mask;
+};
+
+struct flow_match_enc_keyid {
+ struct flow_dissector_key_keyid *key, *mask;
+};
+
+struct flow_match_enc_opts {
+ struct flow_dissector_key_enc_opts *key, *mask;
+};
+
+struct flow_rule;
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+ struct flow_match_basic *out);
+void flow_rule_match_control(const struct flow_rule *rule,
+ struct flow_match_control *out);
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+ struct flow_match_eth_addrs *out);
+void flow_rule_match_vlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out);
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out);
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out);
+void flow_rule_match_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out);
+void flow_rule_match_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out);
+void flow_rule_match_tcp(const struct flow_rule *rule,
+ struct flow_match_tcp *out);
+void flow_rule_match_icmp(const struct flow_rule *rule,
+ struct flow_match_icmp *out);
+void flow_rule_match_mpls(const struct flow_rule *rule,
+ struct flow_match_mpls *out);
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+ struct flow_match_control *out);
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out);
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out);
+void flow_rule_match_enc_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out);
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out);
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+ struct flow_match_enc_keyid *out);
+void flow_rule_match_enc_opts(const struct flow_rule *rule,
+ struct flow_match_enc_opts *out);
+
+enum flow_action_id {
+ FLOW_ACTION_ACCEPT = 0,
+ FLOW_ACTION_DROP,
+ FLOW_ACTION_TRAP,
+ FLOW_ACTION_GOTO,
+ FLOW_ACTION_REDIRECT,
+ FLOW_ACTION_MIRRED,
+ FLOW_ACTION_VLAN_PUSH,
+ FLOW_ACTION_VLAN_POP,
+ FLOW_ACTION_VLAN_MANGLE,
+ FLOW_ACTION_TUNNEL_ENCAP,
+ FLOW_ACTION_TUNNEL_DECAP,
+ FLOW_ACTION_MANGLE,
+ FLOW_ACTION_ADD,
+ FLOW_ACTION_CSUM,
+ FLOW_ACTION_MARK,
+ FLOW_ACTION_WAKE,
+ FLOW_ACTION_QUEUE,
+};
+
+/* This is mirroring enum pedit_header_type definition for easy mapping between
+ * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
+ * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
+ */
+enum flow_action_mangle_base {
+ FLOW_ACT_MANGLE_UNSPEC = 0,
+ FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ FLOW_ACT_MANGLE_HDR_TYPE_UDP,
+};
+
+struct flow_action_entry {
+ enum flow_action_id id;
+ union {
+ u32 chain_index; /* FLOW_ACTION_GOTO */
+ struct net_device *dev; /* FLOW_ACTION_REDIRECT */
+ struct { /* FLOW_ACTION_VLAN */
+ u16 vid;
+ __be16 proto;
+ u8 prio;
+ } vlan;
+ struct { /* FLOW_ACTION_PACKET_EDIT */
+ enum flow_action_mangle_base htype;
+ u32 offset;
+ u32 mask;
+ u32 val;
+ } mangle;
+ const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
+ u32 csum_flags; /* FLOW_ACTION_CSUM */
+ u32 mark; /* FLOW_ACTION_MARK */
+ struct { /* FLOW_ACTION_QUEUE */
+ u32 ctx;
+ u32 index;
+ u8 vf;
+ } queue;
+ };
+};
+
+struct flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry entries[0];
+};
+
+static inline bool flow_action_has_entries(const struct flow_action *action)
+{
+ return action->num_entries;
+}
+
+#define flow_action_for_each(__i, __act, __actions) \
+ for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[__i++])
+
+struct flow_rule {
+ struct flow_match match;
+ struct flow_action action;
+};
+
+struct flow_rule *flow_rule_alloc(unsigned int num_actions);
+
+static inline bool flow_rule_match_key(const struct flow_rule *rule,
+ enum flow_dissector_key_id key)
+{
+ return dissector_uses_key(rule->match.dissector, key);
+}
+
+struct flow_stats {
+ u64 pkts;
+ u64 bytes;
+ u64 lastused;
+};
+
+static inline void flow_stats_update(struct flow_stats *flow_stats,
+ u64 bytes, u64 pkts, u64 lastused)
+{
+ flow_stats->pkts = pkts;
+ flow_stats->bytes = bytes;
+ flow_stats->lastused = lastused;
+}
+
+#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 40965fbbcd31..cb8be396a11f 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -6,6 +6,7 @@
#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
+#include <net/flow_offload.h>
/* TC action not accessible from user space */
#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
@@ -619,8 +620,11 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
+int tc_setup_flow_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
void *type_data, bool err_stop);
+unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
enum tc_block_command {
TC_BLOCK_BIND,
@@ -760,13 +764,17 @@ struct tc_cls_flower_offload {
struct tc_cls_common_offload common;
enum tc_fl_command command;
unsigned long cookie;
- struct flow_dissector *dissector;
- struct fl_flow_key *mask;
- struct fl_flow_key *key;
- struct tcf_exts *exts;
+ struct flow_rule *rule;
+ struct flow_stats stats;
u32 classid;
};
+static inline struct flow_rule *
+tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
+{
+ return tc_flow_cmd->rule;
+}
+
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 63843ae5dc81..5e87b54c5dc5 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -43,7 +43,6 @@ static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_UNDEFINED,
- SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT,
@@ -61,7 +60,6 @@ struct switchdev_attr {
void *complete_priv;
void (*complete)(struct net_device *dev, int err, void *priv);
union {
- struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
unsigned long brport_flags_support; /* PORT_BRIDGE_FLAGS_SUPPORT */
@@ -208,9 +206,6 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
-bool switchdev_port_same_parent_id(struct net_device *a,
- struct net_device *b);
-
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
@@ -295,12 +290,6 @@ call_switchdev_blocking_notifiers(unsigned long val,
return NOTIFY_DONE;
}
-static inline bool switchdev_port_same_parent_id(struct net_device *a,
- struct net_device *b)
-{
- return false;
-}
-
static inline int
switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 44acfbca1266..40705364a50f 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -46,6 +46,35 @@ TRACE_EVENT(devlink_hwmsg,
(int) __entry->len, __get_dynamic_array(buf), __entry->len)
);
+/*
+ * Tracepoint for devlink hardware error:
+ */
+TRACE_EVENT(devlink_hwerr,
+ TP_PROTO(const struct devlink *devlink, int err, const char *msg),
+
+ TP_ARGS(devlink, err, msg),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink->dev->bus->name)
+ __string(dev_name, dev_name(devlink->dev))
+ __string(driver_name, devlink->dev->driver->name)
+ __field(int, err)
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(bus_name, devlink->dev->bus->name);
+ __assign_str(dev_name, dev_name(devlink->dev));
+ __assign_str(driver_name, devlink->dev->driver->name);
+ __entry->err = err;
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s err=%d %s",
+ __get_str(bus_name), __get_str(dev_name),
+ __get_str(driver_name), __entry->err, __get_str(msg))
+);
+
#endif /* _TRACE_DEVLINK_H */
/* This part must be outside protection */
@@ -64,6 +93,10 @@ static inline void trace_devlink_hwmsg(const struct devlink *devlink,
{
}
+static inline void trace_devlink_hwerr(const struct devlink *devlink,
+ int err, const char *msg)
+{
+}
#endif /* _TRACE_DEVLINK_H */
#endif
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 4d2b9eb7604a..db9e8ab96d48 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -14,7 +14,7 @@ static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev)
/* dev is yet to be added to the port list. */
list_for_each_entry(p, &br->port_list, list) {
- if (switchdev_port_same_parent_id(dev, p->dev))
+ if (netdev_port_same_parent_id(dev, p->dev))
return p->offload_fwd_mark;
}
@@ -23,15 +23,12 @@ static int br_switchdev_mark_get(struct net_bridge *br, struct net_device *dev)
int nbp_switchdev_mark_set(struct net_bridge_port *p)
{
- struct switchdev_attr attr = {
- .orig_dev = p->dev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
+ struct netdev_phys_item_id ppid = { };
int err;
ASSERT_RTNL();
- err = switchdev_port_attr_get(p->dev, &attr);
+ err = dev_get_port_parent_id(p->dev, &ppid, true);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
diff --git a/net/core/Makefile b/net/core/Makefile
index fccd31e0e7f7..f97d6254e564 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o
+ fib_notifier.o xdp.o flow_offload.o
obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 78c3b48392e1..ecbe419e05ab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7878,6 +7878,63 @@ int dev_get_phys_port_name(struct net_device *dev,
EXPORT_SYMBOL(dev_get_phys_port_name);
/**
+ * dev_get_port_parent_id - Get the device's port parent identifier
+ * @dev: network device
+ * @ppid: pointer to a storage for the port's parent identifier
+ * @recurse: allow/disallow recursion to lower devices
+ *
+ * Get the devices's port parent identifier
+ */
+int dev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid,
+ bool recurse)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ struct netdev_phys_item_id first = { };
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (ops->ndo_get_port_parent_id)
+ return ops->ndo_get_port_parent_id(dev, ppid);
+
+ if (!recurse)
+ return err;
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = dev_get_port_parent_id(lower_dev, ppid, recurse);
+ if (err)
+ break;
+ if (!first.id_len)
+ first = *ppid;
+ else if (memcmp(&first, ppid, sizeof(*ppid)))
+ return -ENODATA;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(dev_get_port_parent_id);
+
+/**
+ * netdev_port_same_parent_id - Indicate if two network devices have
+ * the same port parent identifier
+ * @a: first network device
+ * @b: second network device
+ */
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
+{
+ struct netdev_phys_item_id a_id = { };
+ struct netdev_phys_item_id b_id = { };
+
+ if (dev_get_port_parent_id(a, &a_id, true) ||
+ dev_get_port_parent_id(b, &b_id, true))
+ return false;
+
+ return netdev_phys_item_id_same(&a_id, &b_id);
+}
+EXPORT_SYMBOL(netdev_port_same_parent_id);
+
+/**
* dev_change_proto_down - update protocol port state information
* @dev: device
* @proto_down: new value
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 52bf27491fb8..cd0d393bc62d 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -81,6 +81,7 @@ struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
EXPORT_SYMBOL(devlink_dpipe_header_ipv6);
EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
static LIST_HEAD(devlink_list);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 45c0a6e3d6ad..0fbf39239b29 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -29,6 +29,7 @@
#include <linux/net.h>
#include <net/devlink.h>
#include <net/xdp_sock.h>
+#include <net/flow_offload.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -2820,3 +2821,243 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
return rc;
}
+
+struct ethtool_rx_flow_key {
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_ip ip;
+ struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_eth_addrs eth_addrs;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct ethtool_rx_flow_match {
+ struct flow_dissector dissector;
+ struct ethtool_rx_flow_key key;
+ struct ethtool_rx_flow_key mask;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
+{
+ const struct ethtool_rx_flow_spec *fs = input->fs;
+ static struct in6_addr zero_addr = {};
+ struct ethtool_rx_flow_match *match;
+ struct ethtool_rx_flow_rule *flow;
+ struct flow_action_entry *act;
+
+ flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) +
+ sizeof(struct ethtool_rx_flow_match), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ /* ethtool_rx supports only one single action per rule. */
+ flow->rule = flow_rule_alloc(1);
+ if (!flow->rule) {
+ kfree(flow);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ match = (struct ethtool_rx_flow_match *)flow->priv;
+ flow->rule->match.dissector = &match->dissector;
+ flow->rule->match.mask = &match->mask;
+ flow->rule->match.key = &match->key;
+
+ match->mask.basic.n_proto = htons(0xffff);
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+
+ match->key.basic.n_proto = htons(ETH_P_IP);
+
+ v4_spec = &fs->h_u.tcp_ip4_spec;
+ v4_m_spec = &fs->m_u.tcp_ip4_spec;
+
+ if (v4_m_spec->ip4src) {
+ match->key.ipv4.src = v4_spec->ip4src;
+ match->mask.ipv4.src = v4_m_spec->ip4src;
+ }
+ if (v4_m_spec->ip4dst) {
+ match->key.ipv4.dst = v4_spec->ip4dst;
+ match->mask.ipv4.dst = v4_m_spec->ip4dst;
+ }
+ if (v4_m_spec->ip4src ||
+ v4_m_spec->ip4dst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, ipv4);
+ }
+ if (v4_m_spec->psrc) {
+ match->key.tp.src = v4_spec->psrc;
+ match->mask.tp.src = v4_m_spec->psrc;
+ }
+ if (v4_m_spec->pdst) {
+ match->key.tp.dst = v4_spec->pdst;
+ match->mask.tp.dst = v4_m_spec->pdst;
+ }
+ if (v4_m_spec->psrc ||
+ v4_m_spec->pdst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
+ offsetof(struct ethtool_rx_flow_key, tp);
+ }
+ if (v4_m_spec->tos) {
+ match->key.ip.tos = v4_spec->tos;
+ match->mask.ip.tos = v4_m_spec->tos;
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IP);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
+ offsetof(struct ethtool_rx_flow_key, ip);
+ }
+ }
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+
+ match->key.basic.n_proto = htons(ETH_P_IPV6);
+
+ v6_spec = &fs->h_u.tcp_ip6_spec;
+ v6_m_spec = &fs->m_u.tcp_ip6_spec;
+ if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ memcpy(&match->key.ipv6.src, v6_spec->ip6src,
+ sizeof(match->key.ipv6.src));
+ memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
+ sizeof(match->mask.ipv6.src));
+ }
+ if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
+ memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
+ sizeof(match->key.ipv6.dst));
+ memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
+ sizeof(match->mask.ipv6.dst));
+ }
+ if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
+ memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, ipv6);
+ }
+ if (v6_m_spec->psrc) {
+ match->key.tp.src = v6_spec->psrc;
+ match->mask.tp.src = v6_m_spec->psrc;
+ }
+ if (v6_m_spec->pdst) {
+ match->key.tp.dst = v6_spec->pdst;
+ match->mask.tp.dst = v6_m_spec->pdst;
+ }
+ if (v6_m_spec->psrc ||
+ v6_m_spec->pdst) {
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_PORTS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
+ offsetof(struct ethtool_rx_flow_key, tp);
+ }
+ if (v6_m_spec->tclass) {
+ match->key.ip.tos = v6_spec->tclass;
+ match->mask.ip.tos = v6_m_spec->tclass;
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_IP);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
+ offsetof(struct ethtool_rx_flow_key, ip);
+ }
+ }
+ break;
+ default:
+ ethtool_rx_flow_rule_destroy(flow);
+ return ERR_PTR(-EINVAL);
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ match->key.basic.ip_proto = IPPROTO_TCP;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ match->key.basic.ip_proto = IPPROTO_UDP;
+ break;
+ }
+ match->mask.basic.ip_proto = 0xff;
+
+ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] =
+ offsetof(struct ethtool_rx_flow_key, basic);
+
+ if (fs->flow_type & FLOW_EXT) {
+ const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+ const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+
+ if (ext_m_spec->vlan_etype &&
+ ext_m_spec->vlan_tci) {
+ match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype;
+ match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype;
+
+ match->key.vlan.vlan_id =
+ ntohs(ext_h_spec->vlan_tci) & 0x0fff;
+ match->mask.vlan.vlan_id =
+ ntohs(ext_m_spec->vlan_tci) & 0x0fff;
+
+ match->key.vlan.vlan_priority =
+ (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
+ match->mask.vlan.vlan_priority =
+ (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13;
+
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_VLAN);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
+ offsetof(struct ethtool_rx_flow_key, vlan);
+ }
+ }
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
+ const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
+
+ if (ext_m_spec->h_dest) {
+ memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest,
+ ETH_ALEN);
+ memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest,
+ ETH_ALEN);
+
+ match->dissector.used_keys |=
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] =
+ offsetof(struct ethtool_rx_flow_key, eth_addrs);
+ }
+ }
+
+ act = &flow->rule->action.entries[0];
+ switch (fs->ring_cookie) {
+ case RX_CLS_FLOW_DISC:
+ act->id = FLOW_ACTION_DROP;
+ break;
+ case RX_CLS_FLOW_WAKE:
+ act->id = FLOW_ACTION_WAKE;
+ break;
+ default:
+ act->id = FLOW_ACTION_QUEUE;
+ if (fs->flow_type & FLOW_RSS)
+ act->queue.ctx = input->rss_ctx;
+
+ act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ break;
+ }
+
+ return flow;
+}
+EXPORT_SYMBOL(ethtool_rx_flow_rule_create);
+
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow)
+{
+ kfree(flow->rule);
+ kfree(flow);
+}
+EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy);
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
new file mode 100644
index 000000000000..c3a00eac4804
--- /dev/null
+++ b/net/core/flow_offload.c
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <net/flow_offload.h>
+
+struct flow_rule *flow_rule_alloc(unsigned int num_actions)
+{
+ struct flow_rule *rule;
+
+ rule = kzalloc(sizeof(struct flow_rule) +
+ sizeof(struct flow_action_entry) * num_actions,
+ GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ rule->action.num_entries = num_actions;
+
+ return rule;
+}
+EXPORT_SYMBOL(flow_rule_alloc);
+
+#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
+ const struct flow_match *__m = &(__rule)->match; \
+ struct flow_dissector *__d = (__m)->dissector; \
+ \
+ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
+ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
+
+void flow_rule_match_basic(const struct flow_rule *rule,
+ struct flow_match_basic *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
+}
+EXPORT_SYMBOL(flow_rule_match_basic);
+
+void flow_rule_match_control(const struct flow_rule *rule,
+ struct flow_match_control *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
+}
+EXPORT_SYMBOL(flow_rule_match_control);
+
+void flow_rule_match_eth_addrs(const struct flow_rule *rule,
+ struct flow_match_eth_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_eth_addrs);
+
+void flow_rule_match_vlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
+}
+EXPORT_SYMBOL(flow_rule_match_vlan);
+
+void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
+
+void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
+
+void flow_rule_match_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ip);
+
+void flow_rule_match_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ports);
+
+void flow_rule_match_tcp(const struct flow_rule *rule,
+ struct flow_match_tcp *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_tcp);
+
+void flow_rule_match_icmp(const struct flow_rule *rule,
+ struct flow_match_icmp *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_icmp);
+
+void flow_rule_match_mpls(const struct flow_rule *rule,
+ struct flow_match_mpls *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_mpls);
+
+void flow_rule_match_enc_control(const struct flow_rule *rule,
+ struct flow_match_control *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_control);
+
+void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv4_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
+
+void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
+ struct flow_match_ipv6_addrs *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
+
+void flow_rule_match_enc_ip(const struct flow_rule *rule,
+ struct flow_match_ip *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ip);
+
+void flow_rule_match_enc_ports(const struct flow_rule *rule,
+ struct flow_match_ports *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_ports);
+
+void flow_rule_match_enc_keyid(const struct flow_rule *rule,
+ struct flow_match_enc_keyid *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_keyid);
+
+void flow_rule_match_enc_opts(const struct flow_rule *rule,
+ struct flow_match_enc_opts *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
+}
+EXPORT_SYMBOL(flow_rule_match_enc_opts);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index ff9fd2bb4ce4..7c5061123ead 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -12,7 +12,6 @@
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
@@ -501,16 +500,11 @@ static ssize_t phys_switch_id_show(struct device *dev,
return restart_syscall();
if (dev_isalive(netdev)) {
- struct switchdev_attr attr = {
- .orig_dev = netdev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
- };
+ struct netdev_phys_item_id ppid = { };
- ret = switchdev_port_attr_get(netdev, &attr);
+ ret = dev_get_port_parent_id(netdev, &ppid, false);
if (!ret)
- ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
- attr.u.ppid.id);
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
}
rtnl_unlock();
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f5a98082ac7a..a51cab95ba64 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -46,7 +46,6 @@
#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <net/switchdev.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
@@ -1146,22 +1145,17 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
+ struct netdev_phys_item_id ppid = { };
int err;
- struct switchdev_attr attr = {
- .orig_dev = dev,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- .flags = SWITCHDEV_F_NO_RECURSE,
- };
- err = switchdev_port_attr_get(dev, &attr);
+ err = dev_get_port_parent_id(dev, &ppid, false);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
return err;
}
- if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
- attr.u.ppid.id))
+ if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
return -EMSGSIZE;
return 0;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 91de3a663226..70395a0ae52e 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -362,18 +362,23 @@ static int dsa_slave_port_obj_del(struct net_device *dev,
return err;
}
-static int dsa_slave_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
+static int dsa_slave_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct dsa_switch_tree *dst = ds->dst;
+ ppid->id_len = sizeof(dst->index);
+ memcpy(&ppid->id, &dst->index, ppid->id_len);
+
+ return 0;
+}
+
+static int dsa_slave_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(dst->index);
- memcpy(&attr->u.ppid.id, &dst->index, attr->u.ppid.id_len);
- break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
attr->u.brport_flags_support = 0;
break;
@@ -1046,6 +1051,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
.ndo_setup_tc = dsa_slave_setup_tc,
.ndo_get_stats64 = dsa_slave_get_stats64,
+ .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
};
static const struct switchdev_ops dsa_slave_switchdev_ops = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index fb99002c3d4e..e536970557dd 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -67,7 +67,6 @@
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/nexthop.h>
-#include <net/switchdev.h>
#include <linux/nospec.h>
@@ -837,10 +836,8 @@ static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
static int vif_add(struct net *net, struct mr_table *mrt,
struct vifctl *vifc, int mrtsock)
{
+ struct netdev_phys_item_id ppid = { };
int vifi = vifc->vifc_vifi;
- struct switchdev_attr attr = {
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
struct vif_device *v = &mrt->vif_table[vifi];
struct net_device *dev;
struct in_device *in_dev;
@@ -919,10 +916,10 @@ static int vif_add(struct net *net, struct mr_table *mrt,
vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
(VIFF_TUNNEL | VIFF_REGISTER));
- attr.orig_dev = dev;
- if (!switchdev_port_attr_get(dev, &attr)) {
- memcpy(v->dev_parent_id.id, attr.u.ppid.id, attr.u.ppid.id_len);
- v->dev_parent_id.id_len = attr.u.ppid.id_len;
+ err = dev_get_port_parent_id(dev, &ppid, true);
+ if (err == 0) {
+ memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
+ v->dev_parent_id.id_len = ppid.id_len;
} else {
v->dev_parent_id.id_len = 0;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index e2b5cb2eb34e..02cf6d2fa0e1 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,6 +31,14 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+#include <net/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_skbedit.h>
+#include <net/tc_act/tc_mirred.h>
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
@@ -2515,6 +2523,114 @@ int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
}
EXPORT_SYMBOL(tc_setup_cb_call);
+int tc_setup_flow_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts)
+{
+ const struct tc_action *act;
+ int i, j, k;
+
+ if (!exts)
+ return 0;
+
+ j = 0;
+ tcf_exts_for_each_action(i, act, exts) {
+ struct flow_action_entry *entry;
+
+ entry = &flow_action->entries[j];
+ if (is_tcf_gact_ok(act)) {
+ entry->id = FLOW_ACTION_ACCEPT;
+ } else if (is_tcf_gact_shot(act)) {
+ entry->id = FLOW_ACTION_DROP;
+ } else if (is_tcf_gact_trap(act)) {
+ entry->id = FLOW_ACTION_TRAP;
+ } else if (is_tcf_gact_goto_chain(act)) {
+ entry->id = FLOW_ACTION_GOTO;
+ entry->chain_index = tcf_gact_goto_chain_index(act);
+ } else if (is_tcf_mirred_egress_redirect(act)) {
+ entry->id = FLOW_ACTION_REDIRECT;
+ entry->dev = tcf_mirred_dev(act);
+ } else if (is_tcf_mirred_egress_mirror(act)) {
+ entry->id = FLOW_ACTION_MIRRED;
+ entry->dev = tcf_mirred_dev(act);
+ } else if (is_tcf_vlan(act)) {
+ switch (tcf_vlan_action(act)) {
+ case TCA_VLAN_ACT_PUSH:
+ entry->id = FLOW_ACTION_VLAN_PUSH;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ case TCA_VLAN_ACT_POP:
+ entry->id = FLOW_ACTION_VLAN_POP;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ entry->id = FLOW_ACTION_VLAN_MANGLE;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ default:
+ goto err_out;
+ }
+ } else if (is_tcf_tunnel_set(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_ENCAP;
+ entry->tunnel = tcf_tunnel_info(act);
+ } else if (is_tcf_tunnel_release(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_DECAP;
+ entry->tunnel = tcf_tunnel_info(act);
+ } else if (is_tcf_pedit(act)) {
+ for (k = 0; k < tcf_pedit_nkeys(act); k++) {
+ switch (tcf_pedit_cmd(act, k)) {
+ case TCA_PEDIT_KEY_EX_CMD_SET:
+ entry->id = FLOW_ACTION_MANGLE;
+ break;
+ case TCA_PEDIT_KEY_EX_CMD_ADD:
+ entry->id = FLOW_ACTION_ADD;
+ break;
+ default:
+ goto err_out;
+ }
+ entry->mangle.htype = tcf_pedit_htype(act, k);
+ entry->mangle.mask = tcf_pedit_mask(act, k);
+ entry->mangle.val = tcf_pedit_val(act, k);
+ entry->mangle.offset = tcf_pedit_offset(act, k);
+ entry = &flow_action->entries[++j];
+ }
+ } else if (is_tcf_csum(act)) {
+ entry->id = FLOW_ACTION_CSUM;
+ entry->csum_flags = tcf_csum_update_flags(act);
+ } else if (is_tcf_skbedit_mark(act)) {
+ entry->id = FLOW_ACTION_MARK;
+ entry->mark = tcf_skbedit_mark(act);
+ } else {
+ goto err_out;
+ }
+
+ if (!is_tcf_pedit(act))
+ j++;
+ }
+ return 0;
+err_out:
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(tc_setup_flow_action);
+
+unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
+{
+ unsigned int num_acts = 0;
+ struct tc_action *act;
+ int i;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ if (is_tcf_pedit(act))
+ num_acts += tcf_pedit_nkeys(act);
+ else
+ num_acts++;
+ }
+ return num_acts;
+}
+EXPORT_SYMBOL(tcf_exts_num_actions);
+
static __net_init int tcf_net_init(struct net *net)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f6aa57fbbbaf..c5d1db3a3db7 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -381,16 +381,27 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(f->flags);
int err;
+ cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+ if (!cls_flower.rule)
+ return -ENOMEM;
+
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f;
- cls_flower.dissector = &f->mask->dissector;
- cls_flower.mask = &f->mask->key;
- cls_flower.key = &f->mkey;
- cls_flower.exts = &f->exts;
+ cls_flower.rule->match.dissector = &f->mask->dissector;
+ cls_flower.rule->match.mask = &f->mask->key;
+ cls_flower.rule->match.key = &f->mkey;
cls_flower.classid = f->res.classid;
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+ if (err) {
+ kfree(cls_flower.rule);
+ return err;
+ }
+
err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
+ kfree(cls_flower.rule);
+
if (err < 0) {
fl_hw_destroy_filter(tp, f, NULL);
return err;
@@ -413,10 +424,13 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
- cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
+
+ tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
+ cls_flower.stats.pkts,
+ cls_flower.stats.lastused);
}
static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
@@ -1463,18 +1477,32 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
if (tc_skip_hw(f->flags))
continue;
+ cls_flower.rule =
+ flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+ if (!cls_flower.rule)
+ return -ENOMEM;
+
tc_cls_common_offload_init(&cls_flower.common, tp,
f->flags, extack);
cls_flower.command = add ?
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long)f;
- cls_flower.dissector = &mask->dissector;
- cls_flower.mask = &mask->key;
- cls_flower.key = &f->mkey;
- cls_flower.exts = &f->exts;
+ cls_flower.rule->match.dissector = &mask->dissector;
+ cls_flower.rule->match.mask = &mask->key;
+ cls_flower.rule->match.key = &f->mkey;
+
+ err = tc_setup_flow_action(&cls_flower.rule->action,
+ &f->exts);
+ if (err) {
+ kfree(cls_flower.rule);
+ return err;
+ }
+
cls_flower.classid = f->res.classid;
err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+ kfree(cls_flower.rule);
+
if (err) {
if (add && tc_skip_sw(f->flags))
return err;
@@ -1489,25 +1517,30 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
return 0;
}
-static void fl_hw_create_tmplt(struct tcf_chain *chain,
- struct fl_flow_tmplt *tmplt)
+static int fl_hw_create_tmplt(struct tcf_chain *chain,
+ struct fl_flow_tmplt *tmplt)
{
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = chain->block;
- struct tcf_exts dummy_exts = { 0, };
+
+ cls_flower.rule = flow_rule_alloc(0);
+ if (!cls_flower.rule)
+ return -ENOMEM;
cls_flower.common.chain_index = chain->index;
cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
cls_flower.cookie = (unsigned long) tmplt;
- cls_flower.dissector = &tmplt->dissector;
- cls_flower.mask = &tmplt->mask;
- cls_flower.key = &tmplt->dummy_key;
- cls_flower.exts = &dummy_exts;
+ cls_flower.rule->match.dissector = &tmplt->dissector;
+ cls_flower.rule->match.mask = &tmplt->mask;
+ cls_flower.rule->match.key = &tmplt->dummy_key;
/* We don't care if driver (any of them) fails to handle this
* call. It serves just as a hint for it.
*/
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
+ kfree(cls_flower.rule);
+
+ return 0;
}
static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
@@ -1551,12 +1584,14 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
if (err)
goto errout_tmplt;
- kfree(tb);
fl_init_dissector(&tmplt->dissector, &tmplt->mask);
- fl_hw_create_tmplt(chain, tmplt);
+ err = fl_hw_create_tmplt(chain, tmplt);
+ if (err)
+ goto errout_tmplt;
+ kfree(tb);
return tmplt;
errout_tmplt:
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index cd78253de31d..7e1357db33d7 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -592,26 +592,6 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
-bool switchdev_port_same_parent_id(struct net_device *a,
- struct net_device *b)
-{
- struct switchdev_attr a_attr = {
- .orig_dev = a,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
- struct switchdev_attr b_attr = {
- .orig_dev = b,
- .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
- };
-
- if (switchdev_port_attr_get(a, &a_attr) ||
- switchdev_port_attr_get(b, &b_attr))
- return false;
-
- return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
-}
-EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
-
static int __switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
diff --git a/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
new file mode 100755
index 000000000000..5ba5bef44d5b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
@@ -0,0 +1,200 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that blackhole routes are marked as offloaded and that packets hitting
+# them are dropped by the ASIC and not by the kernel.
+#
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ blackhole_ipv4
+ blackhole_ipv6
+"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp1 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp1 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+ping_ipv4()
+{
+ ping_test $h1 198.51.100.1 ": h1->h2"
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::1 ": h1->h2"
+}
+
+blackhole_ipv4()
+{
+ # Transmit packets from H1 to H2 and make sure they are dropped by the
+ # ASIC and not by the kernel
+ RET=0
+
+ ip -4 route add blackhole 198.51.100.0/30
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_hw dst_ip 198.51.100.1 src_ip 192.0.2.1 ip_proto icmp \
+ action pass
+
+ ip -4 route show 198.51.100.0/30 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping_do $h1 198.51.100.1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv4 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+ ip -4 route del blackhole 198.51.100.0/30
+}
+
+blackhole_ipv6()
+{
+ RET=0
+
+ ip -6 route add blackhole 2001:db8:2::/120
+ tc filter add dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower \
+ skip_hw dst_ip 2001:db8:2::1 src_ip 2001:db8:1::1 \
+ ip_proto icmpv6 action pass
+
+ ip -6 route show 2001:db8:2::/120 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping6_do $h1 2001:db8:2::1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv6 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower
+ ip -6 route del blackhole 2001:db8:2::/120
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS